OpenWrt
/branches/18.06.1/target/linux/generic/pending-4.9/0931-w1-gpio-fix-problem-with-platfom-data-in-w1-gpio.patch |
---|
@@ -0,0 +1,38 @@ |
From d9c8bc8c1408f3e8529db6e4e04017b4c579c342 Mon Sep 17 00:00:00 2001 |
From: Pawel Dembicki <paweldembicki@gmail.com> |
Date: Sun, 18 Feb 2018 17:08:04 +0100 |
Subject: [PATCH] w1: gpio: fix problem with platfom data in w1-gpio |
In devices, where fdt is used, is impossible to apply platform data |
without proper fdt node. |
This patch allow to use platform data in devices with fdt. |
Signed-off-by: Pawel Dembicki <paweldembicki@gmail.com> |
--- |
drivers/w1/masters/w1-gpio.c | 7 +++---- |
1 file changed, 3 insertions(+), 4 deletions(-) |
--- a/drivers/w1/masters/w1-gpio.c |
+++ b/drivers/w1/masters/w1-gpio.c |
@@ -113,17 +113,16 @@ static int w1_gpio_probe_dt(struct platf |
static int w1_gpio_probe(struct platform_device *pdev) |
{ |
struct w1_bus_master *master; |
- struct w1_gpio_platform_data *pdata; |
+ struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev); |
int err; |
- if (of_have_populated_dt()) { |
+ if (of_have_populated_dt() && !pdata) { |
err = w1_gpio_probe_dt(pdev); |
if (err < 0) |
return err; |
+ pdata = dev_get_platdata(&pdev->dev); |
} |
- pdata = dev_get_platdata(&pdev->dev); |
- |
if (!pdata) { |
dev_err(&pdev->dev, "No configuration data\n"); |
return -ENXIO; |
/branches/18.06.1/target/linux/generic/pending-4.9/100-MIPS-fix-cache-flushing-for-highmem-pages.patch |
---|
@@ -0,0 +1,30 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: MIPS: fix cache flushing for highmem pages |
Most cache flush ops were no-op for highmem pages. This led to nasty |
segfaults and (in the case of page_address(page) == NULL) kernel |
crashes. |
Fix this by always flushing highmem pages using kmap/kunmap_atomic |
around the actual cache flush. This might be a bit inefficient, but at |
least it's stable. |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
--- a/arch/mips/mm/cache.c |
+++ b/arch/mips/mm/cache.c |
@@ -115,6 +115,13 @@ void __flush_anon_page(struct page *page |
{ |
unsigned long addr = (unsigned long) page_address(page); |
+ if (PageHighMem(page)) { |
+ addr = (unsigned long)kmap_atomic(page); |
+ flush_data_cache_page(addr); |
+ __kunmap_atomic((void *)addr); |
+ return; |
+ } |
+ |
if (pages_do_alias(addr, vmaddr)) { |
if (page_mapcount(page) && !Page_dcache_dirty(page)) { |
void *kaddr; |
/branches/18.06.1/target/linux/generic/pending-4.9/110-ehci_hcd_ignore_oc.patch |
---|
@@ -0,0 +1,79 @@ |
From: Florian Fainelli <florian@openwrt.org> |
Subject: USB: EHCI: add ignore_oc flag to disable overcurrent checking |
This patch adds an ignore_oc flag which can be set by EHCI controller |
not supporting or wanting to disable overcurrent checking. The EHCI |
platform data in include/linux/usb/ehci_pdriver.h is also augmented to |
take advantage of this new flag. |
Signed-off-by: Florian Fainelli <florian@openwrt.org> |
--- |
drivers/usb/host/ehci-hcd.c | 2 +- |
drivers/usb/host/ehci-hub.c | 4 ++-- |
drivers/usb/host/ehci-platform.c | 1 + |
drivers/usb/host/ehci.h | 1 + |
include/linux/usb/ehci_pdriver.h | 1 + |
5 files changed, 6 insertions(+), 3 deletions(-) |
--- a/drivers/usb/host/ehci-hcd.c |
+++ b/drivers/usb/host/ehci-hcd.c |
@@ -651,7 +651,7 @@ static int ehci_run (struct usb_hcd *hcd |
"USB %x.%x started, EHCI %x.%02x%s\n", |
((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), |
temp >> 8, temp & 0xff, |
- ignore_oc ? ", overcurrent ignored" : ""); |
+ (ignore_oc || ehci->ignore_oc) ? ", overcurrent ignored" : ""); |
ehci_writel(ehci, INTR_MASK, |
&ehci->regs->intr_enable); /* Turn On Interrupts */ |
--- a/drivers/usb/host/ehci-hub.c |
+++ b/drivers/usb/host/ehci-hub.c |
@@ -638,7 +638,7 @@ ehci_hub_status_data (struct usb_hcd *hc |
* always set, seem to clear PORT_OCC and PORT_CSC when writing to |
* PORT_POWER; that's surprising, but maybe within-spec. |
*/ |
- if (!ignore_oc) |
+ if (!ignore_oc && !ehci->ignore_oc) |
mask = PORT_CSC | PORT_PEC | PORT_OCC; |
else |
mask = PORT_CSC | PORT_PEC; |
@@ -1008,7 +1008,7 @@ int ehci_hub_control( |
if (temp & PORT_PEC) |
status |= USB_PORT_STAT_C_ENABLE << 16; |
- if ((temp & PORT_OCC) && !ignore_oc){ |
+ if ((temp & PORT_OCC) && (!ignore_oc && !ehci->ignore_oc)){ |
status |= USB_PORT_STAT_C_OVERCURRENT << 16; |
/* |
--- a/drivers/usb/host/ehci-platform.c |
+++ b/drivers/usb/host/ehci-platform.c |
@@ -259,6 +259,8 @@ static int ehci_platform_probe(struct pl |
hcd->has_tt = 1; |
if (pdata->reset_on_resume) |
priv->reset_on_resume = true; |
+ if (pdata->ignore_oc) |
+ ehci->ignore_oc = 1; |
#ifndef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO |
if (ehci->big_endian_mmio) { |
--- a/drivers/usb/host/ehci.h |
+++ b/drivers/usb/host/ehci.h |
@@ -230,6 +230,7 @@ struct ehci_hcd { /* one per controlle |
unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */ |
unsigned need_oc_pp_cycle:1; /* MPC834X port power */ |
unsigned imx28_write_fix:1; /* For Freescale i.MX28 */ |
+ unsigned ignore_oc:1; |
/* required for usb32 quirk */ |
#define OHCI_CTRL_HCFS (3 << 6) |
--- a/include/linux/usb/ehci_pdriver.h |
+++ b/include/linux/usb/ehci_pdriver.h |
@@ -49,6 +49,7 @@ struct usb_ehci_pdata { |
unsigned no_io_watchdog:1; |
unsigned reset_on_resume:1; |
unsigned dma_mask_64:1; |
+ unsigned ignore_oc:1; |
/* Turn on all power and clocks */ |
int (*power_on)(struct platform_device *pdev); |
/branches/18.06.1/target/linux/generic/pending-4.9/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch |
---|
@@ -0,0 +1,82 @@ |
From: Tobias Wolf <dev-NTEO@vplace.de> |
Subject: mm: Fix alloc_node_mem_map with ARCH_PFN_OFFSET calculation |
An rt288x (ralink) based router (Belkin F5D8235 v1) does not boot with any |
kernel beyond version 4.3 resulting in: |
BUG: Bad page state in process swapper pfn:086ac |
bisect resulted in: |
a1c34a3bf00af2cede839879502e12dc68491ad5 is the first bad commit |
commit a1c34a3bf00af2cede839879502e12dc68491ad5 |
Author: Laura Abbott <laura@labbott.name> |
Date: Thu Nov 5 18:48:46 2015 -0800 |
mm: Don't offset memmap for flatmem |
Srinivas Kandagatla reported bad page messages when trying to remove the |
bottom 2MB on an ARM based IFC6410 board |
BUG: Bad page state in process swapper pfn:fffa8 |
page:ef7fb500 count:0 mapcount:0 mapping: (null) index:0x0 |
flags: 0x96640253(locked|error|dirty|active|arch_1|reclaim|mlocked) |
page dumped because: PAGE_FLAGS_CHECK_AT_FREE flag(s) set |
bad because of flags: |
flags: 0x200041(locked|active|mlocked) |
Modules linked in: |
CPU: 0 PID: 0 Comm: swapper Not tainted 3.19.0-rc3-00007-g412f9ba-dirty |
#816 |
Hardware name: Qualcomm (Flattened Device Tree) |
unwind_backtrace |
show_stack |
dump_stack |
bad_page |
free_pages_prepare |
free_hot_cold_page |
__free_pages |
free_highmem_page |
mem_init |
start_kernel |
Disabling lock debugging due to kernel taint |
[...] |
:040000 040000 2de013c372345fd471cd58f0553c9b38b0ef1cc4 |
0a8156f848733dfa21e16c196dfb6c0a76290709 M mm |
This fix for ARM does not account ARCH_PFN_OFFSET for mem_map as later used by |
page_to_pfn anymore. |
The following output was generated with two hacked in printk statements: |
printk("before %p vs. %p or %p\n", mem_map, mem_map - offset, mem_map - |
(pgdat->node_start_pfn - ARCH_PFN_OFFSET)); |
if (page_to_pfn(mem_map) != pgdat->node_start_pfn) |
mem_map -= offset + (pgdat->node_start_pfn - ARCH_PFN_OFFSET); |
printk("after %p\n", mem_map); |
Output: |
[ 0.000000] before 8861b280 vs. 8861b280 or 8851b280 |
[ 0.000000] after 8851b280 |
As seen in the first line mem_map with subtraction of offset does not equal the |
mem_map after subtraction of ARCH_PFN_OFFSET. |
After adding the offset of ARCH_PFN_OFFSET as well to mem_map as the |
previously calculated offset is zero for the named platform it is able to boot |
4.4 and 4.9-rc7 again. |
Signed-off-by: Tobias Wolf <dev-NTEO@vplace.de> |
--- |
--- a/mm/page_alloc.c |
+++ b/mm/page_alloc.c |
@@ -5922,7 +5922,7 @@ static void __ref alloc_node_mem_map(str |
mem_map = NODE_DATA(0)->node_mem_map; |
#if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM) |
if (page_to_pfn(mem_map) != pgdat->node_start_pfn) |
- mem_map -= offset; |
+ mem_map -= offset + (pgdat->node_start_pfn - ARCH_PFN_OFFSET); |
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
} |
#endif |
/branches/18.06.1/target/linux/generic/pending-4.9/130-add-linux-spidev-compatible-si3210.patch |
---|
@@ -0,0 +1,18 @@ |
From: Giuseppe Lippolis <giu.lippolis@gmail.com> |
Subject: Add the linux,spidev compatible in spidev Several device in ramips have this binding in the dts |
Signed-off-by: Giuseppe Lippolis <giu.lippolis@gmail.com> |
--- |
drivers/spi/spidev.c | 1 + |
1 file changed, 1 insertion(+) |
--- a/drivers/spi/spidev.c |
+++ b/drivers/spi/spidev.c |
@@ -696,6 +696,7 @@ static struct class *spidev_class; |
static const struct of_device_id spidev_dt_ids[] = { |
{ .compatible = "rohm,dh2228fv" }, |
{ .compatible = "lineartechnology,ltc2488" }, |
+ { .compatible = "siliconlabs,si3210" }, |
{}, |
}; |
MODULE_DEVICE_TABLE(of, spidev_dt_ids); |
/branches/18.06.1/target/linux/generic/pending-4.9/131-spi-use-gpio_set_value_cansleep-for-setting-chipsele.patch |
---|
@@ -0,0 +1,20 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: spi: use gpio_set_value_cansleep for setting chipselect GPIO |
Sleeping is safe inside spi_transfer_one_message, and some GPIO chips |
need to sleep for setting values |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
--- a/drivers/spi/spi.c |
+++ b/drivers/spi/spi.c |
@@ -700,7 +700,7 @@ static void spi_set_cs(struct spi_device |
enable = !enable; |
if (gpio_is_valid(spi->cs_gpio)) |
- gpio_set_value(spi->cs_gpio, !enable); |
+ gpio_set_value_cansleep(spi->cs_gpio, !enable); |
else if (spi->master->set_cs) |
spi->master->set_cs(spi, !enable); |
} |
/branches/18.06.1/target/linux/generic/pending-4.9/140-jffs2-use-.rename2-and-add-RENAME_WHITEOUT-support.patch |
---|
@@ -0,0 +1,62 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: jffs2: use .rename2 and add RENAME_WHITEOUT support |
It is required for renames on overlayfs |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
--- a/fs/jffs2/dir.c |
+++ b/fs/jffs2/dir.c |
@@ -752,6 +752,24 @@ static int jffs2_mknod (struct inode *di |
return ret; |
} |
+static int jffs2_whiteout (struct inode *old_dir, struct dentry *old_dentry) |
+{ |
+ struct dentry *wh; |
+ int err; |
+ |
+ wh = d_alloc(old_dentry->d_parent, &old_dentry->d_name); |
+ if (!wh) |
+ return -ENOMEM; |
+ |
+ err = jffs2_mknod(old_dir, wh, S_IFCHR | WHITEOUT_MODE, |
+ WHITEOUT_DEV); |
+ if (err) |
+ return err; |
+ |
+ d_rehash(wh); |
+ return 0; |
+} |
+ |
static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, |
struct inode *new_dir_i, struct dentry *new_dentry, |
unsigned int flags) |
@@ -762,7 +780,7 @@ static int jffs2_rename (struct inode *o |
uint8_t type; |
uint32_t now; |
- if (flags & ~RENAME_NOREPLACE) |
+ if (flags & ~(RENAME_NOREPLACE|RENAME_WHITEOUT)) |
return -EINVAL; |
/* The VFS will check for us and prevent trying to rename a |
@@ -828,9 +846,14 @@ static int jffs2_rename (struct inode *o |
if (d_is_dir(old_dentry) && !victim_f) |
inc_nlink(new_dir_i); |
- /* Unlink the original */ |
- ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i), |
- old_dentry->d_name.name, old_dentry->d_name.len, NULL, now); |
+ if (flags & RENAME_WHITEOUT) |
+ /* Replace with whiteout */ |
+ ret = jffs2_whiteout(old_dir_i, old_dentry); |
+ else |
+ /* Unlink the original */ |
+ ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i), |
+ old_dentry->d_name.name, |
+ old_dentry->d_name.len, NULL, now); |
/* We don't touch inode->i_nlink */ |
/branches/18.06.1/target/linux/generic/pending-4.9/141-jffs2-add-RENAME_EXCHANGE-support.patch |
---|
@@ -0,0 +1,73 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: jffs2: add RENAME_EXCHANGE support |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
--- a/fs/jffs2/dir.c |
+++ b/fs/jffs2/dir.c |
@@ -777,18 +777,31 @@ static int jffs2_rename (struct inode *o |
int ret; |
struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb); |
struct jffs2_inode_info *victim_f = NULL; |
+ struct inode *fst_inode = d_inode(old_dentry); |
+ struct inode *snd_inode = d_inode(new_dentry); |
uint8_t type; |
uint32_t now; |
- if (flags & ~(RENAME_NOREPLACE|RENAME_WHITEOUT)) |
+ if (flags & ~(RENAME_NOREPLACE|RENAME_WHITEOUT|RENAME_EXCHANGE)) |
return -EINVAL; |
+ if ((flags & RENAME_EXCHANGE) && (old_dir_i != new_dir_i)) { |
+ if (S_ISDIR(fst_inode->i_mode) && !S_ISDIR(snd_inode->i_mode)) { |
+ inc_nlink(new_dir_i); |
+ drop_nlink(old_dir_i); |
+ } |
+ else if (!S_ISDIR(fst_inode->i_mode) && S_ISDIR(snd_inode->i_mode)) { |
+ drop_nlink(new_dir_i); |
+ inc_nlink(old_dir_i); |
+ } |
+ } |
+ |
/* The VFS will check for us and prevent trying to rename a |
* file over a directory and vice versa, but if it's a directory, |
* the VFS can't check whether the victim is empty. The filesystem |
* needs to do that for itself. |
*/ |
- if (d_really_is_positive(new_dentry)) { |
+ if (d_really_is_positive(new_dentry) && !(flags & RENAME_EXCHANGE)) { |
victim_f = JFFS2_INODE_INFO(d_inode(new_dentry)); |
if (d_is_dir(new_dentry)) { |
struct jffs2_full_dirent *fd; |
@@ -823,7 +836,7 @@ static int jffs2_rename (struct inode *o |
if (ret) |
return ret; |
- if (victim_f) { |
+ if (victim_f && !(flags & RENAME_EXCHANGE)) { |
/* There was a victim. Kill it off nicely */ |
if (d_is_dir(new_dentry)) |
clear_nlink(d_inode(new_dentry)); |
@@ -849,6 +862,12 @@ static int jffs2_rename (struct inode *o |
if (flags & RENAME_WHITEOUT) |
/* Replace with whiteout */ |
ret = jffs2_whiteout(old_dir_i, old_dentry); |
+ else if (flags & RENAME_EXCHANGE) |
+ /* Replace the original */ |
+ ret = jffs2_do_link(c, JFFS2_INODE_INFO(old_dir_i), |
+ d_inode(new_dentry)->i_ino, type, |
+ old_dentry->d_name.name, old_dentry->d_name.len, |
+ now); |
else |
/* Unlink the original */ |
ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i), |
@@ -880,7 +899,7 @@ static int jffs2_rename (struct inode *o |
return ret; |
} |
- if (d_is_dir(old_dentry)) |
+ if (d_is_dir(old_dentry) && !(flags & RENAME_EXCHANGE)) |
drop_nlink(old_dir_i); |
new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now); |
/branches/18.06.1/target/linux/generic/pending-4.9/150-bridge_allow_receiption_on_disabled_port.patch |
---|
@@ -0,0 +1,43 @@ |
From: Stephen Hemminger <stephen@networkplumber.org> |
Subject: bridge: allow receiption on disabled port |
When an ethernet device is enslaved to a bridge, and the bridge STP |
detects loss of carrier (or operational state down), then normally |
packet receiption is blocked. |
This breaks control applications like WPA which maybe expecting to |
receive packets to negotiate to bring link up. The bridge needs to |
block forwarding packets from these disabled ports, but there is no |
hard requirement to not allow local packet delivery. |
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org> |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- a/net/bridge/br_input.c |
+++ b/net/bridge/br_input.c |
@@ -233,7 +233,8 @@ static int br_handle_local_finish(struct |
{ |
struct net_bridge_port *p = br_port_get_rcu(skb->dev); |
- __br_handle_local_finish(skb); |
+ if (p->state != BR_STATE_DISABLED) |
+ __br_handle_local_finish(skb); |
BR_INPUT_SKB_CB(skb)->brdev = p->br->dev; |
br_pass_frame_up(skb); |
@@ -316,6 +317,15 @@ rx_handler_result_t br_handle_frame(stru |
forward: |
switch (p->state) { |
+ case BR_STATE_DISABLED: |
+ if (ether_addr_equal(p->br->dev->dev_addr, dest)) |
+ skb->pkt_type = PACKET_HOST; |
+ |
+ NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, |
+ dev_net(skb->dev), NULL, skb, skb->dev, NULL, |
+ br_handle_local_finish); |
+ break; |
+ |
case BR_STATE_FORWARDING: |
rhook = rcu_dereference(br_should_route_hook); |
if (rhook) { |
/branches/18.06.1/target/linux/generic/pending-4.9/161-mtd-part-add-generic-parsing-of-linux-part-probe.patch |
---|
@@ -0,0 +1,181 @@ |
From: Hauke Mehrtens <hauke@hauke-m.de> |
Subject: mtd: part: add generic parsing of linux,part-probe |
This moves the linux,part-probe device tree parsing code from |
physmap_of.c to mtdpart.c. Now all drivers can use this feature by just |
providing a reference to their device tree node in struct |
mtd_part_parser_data. |
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de> |
--- |
Documentation/devicetree/bindings/mtd/nand.txt | 16 +++++++++ |
drivers/mtd/maps/physmap_of.c | 46 +------------------------- |
drivers/mtd/mtdpart.c | 45 +++++++++++++++++++++++++ |
3 files changed, 62 insertions(+), 45 deletions(-) |
--- a/Documentation/devicetree/bindings/mtd/nand.txt |
+++ b/Documentation/devicetree/bindings/mtd/nand.txt |
@@ -44,6 +44,22 @@ Optional NAND chip properties: |
used by the upper layers, and you want to make your NAND |
as reliable as possible. |
+- linux,part-probe: list of name as strings of the partition parser |
+ which should be used to parse the partition table. |
+ They will be tried in the specified ordering and |
+ the next one will be used if the previous one |
+ failed. |
+ |
+ Example: linux,part-probe = "cmdlinepart", "ofpart"; |
+ |
+ This is also the default value, which will be used |
+ if this attribute is not specified. It could be |
+ that the flash driver in use overwrote the default |
+ value and uses some other default. |
+ |
+ Possible values are: bcm47xxpart, afs, ar7part, |
+ ofoldpart, ofpart, bcm63xxpart, RedBoot, cmdlinepart |
+ |
The ECC strength and ECC step size properties define the correction capability |
of a controller. Together, they say a controller can correct "{strength} bit |
errors per {size} bytes". |
--- a/drivers/mtd/maps/physmap_of.c |
+++ b/drivers/mtd/maps/physmap_of.c |
@@ -113,47 +113,9 @@ static struct mtd_info *obsolete_probe(s |
static const char * const part_probe_types_def[] = { |
"cmdlinepart", "RedBoot", "ofpart", "ofoldpart", NULL }; |
-static const char * const *of_get_probes(struct device_node *dp) |
-{ |
- const char *cp; |
- int cplen; |
- unsigned int l; |
- unsigned int count; |
- const char **res; |
- |
- cp = of_get_property(dp, "linux,part-probe", &cplen); |
- if (cp == NULL) |
- return part_probe_types_def; |
- |
- count = 0; |
- for (l = 0; l != cplen; l++) |
- if (cp[l] == 0) |
- count++; |
- |
- res = kzalloc((count + 1)*sizeof(*res), GFP_KERNEL); |
- if (!res) |
- return NULL; |
- count = 0; |
- while (cplen > 0) { |
- res[count] = cp; |
- l = strlen(cp) + 1; |
- cp += l; |
- cplen -= l; |
- count++; |
- } |
- return res; |
-} |
- |
-static void of_free_probes(const char * const *probes) |
-{ |
- if (probes != part_probe_types_def) |
- kfree(probes); |
-} |
- |
static const struct of_device_id of_flash_match[]; |
static int of_flash_probe(struct platform_device *dev) |
{ |
- const char * const *part_probe_types; |
const struct of_device_id *match; |
struct device_node *dp = dev->dev.of_node; |
struct resource res; |
@@ -317,14 +279,8 @@ static int of_flash_probe(struct platfor |
info->cmtd->dev.parent = &dev->dev; |
mtd_set_of_node(info->cmtd, dp); |
- part_probe_types = of_get_probes(dp); |
- if (!part_probe_types) { |
- err = -ENOMEM; |
- goto err_out; |
- } |
- mtd_device_parse_register(info->cmtd, part_probe_types, NULL, |
+ mtd_device_parse_register(info->cmtd, part_probe_types_def, NULL, |
NULL, 0); |
- of_free_probes(part_probe_types); |
kfree(mtd_list); |
--- a/drivers/mtd/mtdpart.c |
+++ b/drivers/mtd/mtdpart.c |
@@ -29,6 +29,7 @@ |
#include <linux/kmod.h> |
#include <linux/mtd/mtd.h> |
#include <linux/mtd/partitions.h> |
+#include <linux/of.h> |
#include <linux/err.h> |
#include <linux/of.h> |
@@ -827,6 +828,42 @@ void deregister_mtd_parser(struct mtd_pa |
EXPORT_SYMBOL_GPL(deregister_mtd_parser); |
/* |
+ * Parses the linux,part-probe device tree property. |
+ * When a non null value is returned it has to be freed with kfree() by |
+ * the caller. |
+ */ |
+static const char * const *of_get_probes(struct device_node *dp) |
+{ |
+ const char *cp; |
+ int cplen; |
+ unsigned int l; |
+ unsigned int count; |
+ const char **res; |
+ |
+ cp = of_get_property(dp, "linux,part-probe", &cplen); |
+ if (cp == NULL) |
+ return NULL; |
+ |
+ count = 0; |
+ for (l = 0; l != cplen; l++) |
+ if (cp[l] == 0) |
+ count++; |
+ |
+ res = kzalloc((count + 1) * sizeof(*res), GFP_KERNEL); |
+ if (!res) |
+ return NULL; |
+ count = 0; |
+ while (cplen > 0) { |
+ res[count] = cp; |
+ l = strlen(cp) + 1; |
+ cp += l; |
+ cplen -= l; |
+ count++; |
+ } |
+ return res; |
+} |
+ |
+/* |
* Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you |
* are changing this array! |
*/ |
@@ -975,6 +1012,13 @@ int parse_mtd_partitions(struct mtd_info |
struct mtd_partitions pparts = { }; |
struct mtd_part_parser *parser; |
int ret, err = 0; |
+ const char *const *types_of = NULL; |
+ |
+ if (mtd_get_of_node(master)) { |
+ types_of = of_get_probes(mtd_get_of_node(master)); |
+ if (types_of != NULL) |
+ types = types_of; |
+ } |
if (!types) |
types = mtd_is_partition(master) ? default_subpartition_types : |
@@ -1016,6 +1060,7 @@ int parse_mtd_partitions(struct mtd_info |
if (ret < 0 && !err) |
err = ret; |
} |
+ kfree(types_of); |
return err; |
} |
/branches/18.06.1/target/linux/generic/pending-4.9/170-MIPS-PCI-add-controllers-before-the-specified-head.patch |
---|
@@ -0,0 +1,30 @@ |
From: Mathias Kresin <dev@kresin.me> |
Subject: MIPS: PCI: add controllers before the specified head |
With commit 23dac14d058f ("MIPS: PCI: Use struct list_head lists") new |
controllers are added after the specified head where they were added |
before the specified head previously. |
Use list_add_tail to restore the former order. |
This patches fixes the following PCI error on lantiq: |
pci 0000:01:00.0: BAR 0: error updating (0x1c000004 != 0x000000) |
Fixes: 23dac14d058f ("MIPS: PCI: Use struct list_head lists") |
Signed-off-by: Mathias Kresin <dev@kresin.me> |
--- |
arch/mips/pci/pci-legacy.c | 2 +- |
1 file changed, 1 insertion(+), 1 deletion(-) |
--- a/arch/mips/pci/pci-legacy.c |
+++ b/arch/mips/pci/pci-legacy.c |
@@ -194,7 +194,7 @@ void register_pci_controller(struct pci_ |
} |
INIT_LIST_HEAD(&hose->list); |
- list_add(&hose->list, &controllers); |
+ list_add_tail(&hose->list, &controllers); |
/* |
* Do not panic here but later - this might happen before console init. |
/branches/18.06.1/target/linux/generic/pending-4.9/180-net-phy-at803x-add-support-for-AT8032.patch |
---|
@@ -0,0 +1,70 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: net: phy: at803x: add support for AT8032 |
Like AT8030, this PHY needs the GPIO reset workaround |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
--- a/drivers/net/phy/at803x.c |
+++ b/drivers/net/phy/at803x.c |
@@ -62,6 +62,7 @@ |
#define ATH8030_PHY_ID 0x004dd076 |
#define ATH8031_PHY_ID 0x004dd074 |
+#define ATH8032_PHY_ID 0x004dd023 |
#define ATH8035_PHY_ID 0x004dd072 |
MODULE_DESCRIPTION("Atheros 803x PHY driver"); |
@@ -259,7 +260,8 @@ static int at803x_probe(struct phy_devic |
if (!priv) |
return -ENOMEM; |
- if (phydev->drv->phy_id != ATH8030_PHY_ID) |
+ if (phydev->drv->phy_id != ATH8030_PHY_ID && |
+ phydev->drv->phy_id != ATH8032_PHY_ID) |
goto does_not_require_reset_workaround; |
gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); |
@@ -335,7 +337,7 @@ static void at803x_link_change_notify(st |
struct at803x_priv *priv = phydev->priv; |
/* |
- * Conduct a hardware reset for AT8030 every time a link loss is |
+ * Conduct a hardware reset for AT8030/2 every time a link loss is |
* signalled. This is necessary to circumvent a hardware bug that |
* occurs when the cable is unplugged while TX packets are pending |
* in the FIFO. In such cases, the FIFO enters an error mode it |
@@ -447,6 +449,24 @@ static struct phy_driver at803x_driver[] |
.aneg_done = at803x_aneg_done, |
.ack_interrupt = &at803x_ack_interrupt, |
.config_intr = &at803x_config_intr, |
+}, { |
+ /* ATHEROS 8032 */ |
+ .phy_id = ATH8032_PHY_ID, |
+ .name = "Atheros 8032 ethernet", |
+ .phy_id_mask = 0xffffffef, |
+ .probe = at803x_probe, |
+ .config_init = at803x_config_init, |
+ .link_change_notify = at803x_link_change_notify, |
+ .set_wol = at803x_set_wol, |
+ .get_wol = at803x_get_wol, |
+ .suspend = at803x_suspend, |
+ .resume = at803x_resume, |
+ .features = PHY_BASIC_FEATURES, |
+ .flags = PHY_HAS_INTERRUPT, |
+ .config_aneg = genphy_config_aneg, |
+ .read_status = genphy_read_status, |
+ .ack_interrupt = at803x_ack_interrupt, |
+ .config_intr = at803x_config_intr, |
} }; |
module_phy_driver(at803x_driver); |
@@ -454,6 +474,7 @@ module_phy_driver(at803x_driver); |
static struct mdio_device_id __maybe_unused atheros_tbl[] = { |
{ ATH8030_PHY_ID, 0xffffffef }, |
{ ATH8031_PHY_ID, 0xffffffef }, |
+ { ATH8032_PHY_ID, 0xffffffef }, |
{ ATH8035_PHY_ID, 0xffffffef }, |
{ } |
}; |
/branches/18.06.1/target/linux/generic/pending-4.9/190-2-5-e1000e-Fix-wrong-comment-related-to-link-detection.patch |
---|
@@ -0,0 +1,43 @@ |
From patchwork Fri Jul 21 18:36:24 2017 |
Content-Type: text/plain; charset="utf-8" |
MIME-Version: 1.0 |
Content-Transfer-Encoding: 7bit |
Subject: [2/5] e1000e: Fix wrong comment related to link detection |
From: Benjamin Poirier <bpoirier@suse.com> |
X-Patchwork-Id: 9857489 |
Message-Id: <20170721183627.13373-2-bpoirier@suse.com> |
To: Jeff Kirsher <jeffrey.t.kirsher@intel.com> |
Cc: Lennart Sorensen <lsorense@csclub.uwaterloo.ca>, |
intel-wired-lan@lists.osuosl.org, netdev@vger.kernel.org, |
linux-kernel@vger.kernel.org |
Date: Fri, 21 Jul 2017 11:36:24 -0700 |
Reading e1000e_check_for_copper_link() shows that get_link_status is set to |
false after link has been detected. Therefore, it stays TRUE until then. |
Signed-off-by: Benjamin Poirier <bpoirier@suse.com> |
Tested-by: Aaron Brown <aaron.f.brown@intel.com> |
--- |
drivers/net/ethernet/intel/e1000e/netdev.c | 4 ++-- |
1 file changed, 2 insertions(+), 2 deletions(-) |
--- a/drivers/net/ethernet/intel/e1000e/netdev.c |
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c |
@@ -5067,7 +5067,7 @@ static bool e1000e_has_link(struct e1000 |
/* get_link_status is set on LSC (link status) interrupt or |
* Rx sequence error interrupt. get_link_status will stay |
- * false until the check_for_link establishes link |
+ * true until the check_for_link establishes link |
* for copper adapters ONLY |
*/ |
switch (hw->phy.media_type) { |
@@ -5085,7 +5085,7 @@ static bool e1000e_has_link(struct e1000 |
break; |
case e1000_media_type_internal_serdes: |
ret_val = hw->mac.ops.check_for_link(hw); |
- link_active = adapter->hw.mac.serdes_has_link; |
+ link_active = hw->mac.serdes_has_link; |
break; |
default: |
case e1000_media_type_unknown: |
/branches/18.06.1/target/linux/generic/pending-4.9/201-extra_optimization.patch |
---|
@@ -0,0 +1,32 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: Upgrade to Linux 2.6.19 |
- Includes large parts of the patch from #1021 by dpalffy |
- Includes RB532 NAND driver changes by n0-1 |
[john@phrozen.org: feix will add this to his upstream queue] |
lede-commit: bff468813f78f81e36ebb2a3f4354de7365e640f |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
Makefile | 6 +++--- |
1 file changed, 3 insertions(+), 3 deletions(-) |
--- a/Makefile |
+++ b/Makefile |
@@ -680,12 +680,12 @@ endif |
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE |
KBUILD_CFLAGS += $(call cc-option,-Oz,-Os) |
-KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,) |
+KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,) $(EXTRA_OPTIMIZATION) |
else |
ifdef CONFIG_PROFILE_ALL_BRANCHES |
-KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,) |
+KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,) $(EXTRA_OPTIMIZATION) |
else |
-KBUILD_CFLAGS += -O2 |
+KBUILD_CFLAGS += -O2 -fno-reorder-blocks -fno-tree-ch $(EXTRA_OPTIMIZATION) |
endif |
endif |
/branches/18.06.1/target/linux/generic/pending-4.9/203-kallsyms_uncompressed.patch |
---|
@@ -0,0 +1,119 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: kernel: add a config option for keeping the kallsyms table uncompressed, saving ~9kb kernel size after lzma on ar71xx |
[john@phrozen.org: added to my upstream queue 30.12.2016] |
lede-commit: e0e3509b5ce2ccf93d4d67ea907613f5f7ec2eed |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
init/Kconfig | 11 +++++++++++ |
kernel/kallsyms.c | 8 ++++++++ |
scripts/kallsyms.c | 12 ++++++++++++ |
scripts/link-vmlinux.sh | 4 ++++ |
4 files changed, 35 insertions(+) |
--- a/init/Kconfig |
+++ b/init/Kconfig |
@@ -1370,6 +1370,17 @@ config SYSCTL_ARCH_UNALIGN_ALLOW |
the unaligned access emulation. |
see arch/parisc/kernel/unaligned.c for reference |
+config KALLSYMS_UNCOMPRESSED |
+ bool "Keep kallsyms uncompressed" |
+ depends on KALLSYMS |
+ help |
+ Normally kallsyms contains compressed symbols (using a token table), |
+ reducing the uncompressed kernel image size. Keeping the symbol table |
+ uncompressed significantly improves the size of this part in compressed |
+ kernel images. |
+ |
+ Say N unless you need compressed kernel images to be small. |
+ |
config HAVE_PCSPKR_PLATFORM |
bool |
--- a/kernel/kallsyms.c |
+++ b/kernel/kallsyms.c |
@@ -113,6 +113,11 @@ static unsigned int kallsyms_expand_symb |
* For every byte on the compressed symbol data, copy the table |
* entry for that byte. |
*/ |
+#ifdef CONFIG_KALLSYMS_UNCOMPRESSED |
+ memcpy(result, data + 1, len - 1); |
+ result += len - 1; |
+ len = 0; |
+#endif |
while (len) { |
tptr = &kallsyms_token_table[kallsyms_token_index[*data]]; |
data++; |
@@ -145,6 +150,9 @@ tail: |
*/ |
static char kallsyms_get_symbol_type(unsigned int off) |
{ |
+#ifdef CONFIG_KALLSYMS_UNCOMPRESSED |
+ return kallsyms_names[off + 1]; |
+#endif |
/* |
* Get just the first code, look it up in the token table, |
* and return the first char from this token. |
--- a/scripts/kallsyms.c |
+++ b/scripts/kallsyms.c |
@@ -61,6 +61,7 @@ static struct addr_range percpu_range = |
static struct sym_entry *table; |
static unsigned int table_size, table_cnt; |
static int all_symbols = 0; |
+static int uncompressed = 0; |
static int absolute_percpu = 0; |
static char symbol_prefix_char = '\0'; |
static int base_relative = 0; |
@@ -446,6 +447,9 @@ static void write_src(void) |
free(markers); |
+ if (uncompressed) |
+ return; |
+ |
output_label("kallsyms_token_table"); |
off = 0; |
for (i = 0; i < 256; i++) { |
@@ -504,6 +508,9 @@ static void *find_token(unsigned char *s |
{ |
int i; |
+ if (uncompressed) |
+ return NULL; |
+ |
for (i = 0; i < len - 1; i++) { |
if (str[i] == token[0] && str[i+1] == token[1]) |
return &str[i]; |
@@ -576,6 +583,9 @@ static void optimize_result(void) |
{ |
int i, best; |
+ if (uncompressed) |
+ return; |
+ |
/* using the '\0' symbol last allows compress_symbols to use standard |
* fast string functions */ |
for (i = 255; i >= 0; i--) { |
@@ -764,6 +774,8 @@ int main(int argc, char **argv) |
symbol_prefix_char = *p; |
} else if (strcmp(argv[i], "--base-relative") == 0) |
base_relative = 1; |
+ else if (strcmp(argv[i], "--uncompressed") == 0) |
+ uncompressed = 1; |
else |
usage(); |
} |
--- a/scripts/link-vmlinux.sh |
+++ b/scripts/link-vmlinux.sh |
@@ -136,6 +136,10 @@ kallsyms() |
kallsymopt="${kallsymopt} --base-relative" |
fi |
+ if [ -n "${CONFIG_KALLSYMS_UNCOMPRESSED}" ]; then |
+ kallsymopt="${kallsymopt} --uncompressed" |
+ fi |
+ |
local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \ |
${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}" |
/branches/18.06.1/target/linux/generic/pending-4.9/205-backtrace_module_info.patch |
---|
@@ -0,0 +1,45 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: kernel: when KALLSYMS is disabled, print module address + size for matching backtrace entries |
[john@phrozen.org: felix will add this to his upstream queue] |
lede-commit 53827cdc824556cda910b23ce5030c363b8f1461 |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
lib/vsprintf.c | 15 +++++++++++---- |
1 file changed, 11 insertions(+), 4 deletions(-) |
--- a/lib/vsprintf.c |
+++ b/lib/vsprintf.c |
@@ -669,8 +669,10 @@ char *symbol_string(char *buf, char *end |
struct printf_spec spec, const char *fmt) |
{ |
unsigned long value; |
-#ifdef CONFIG_KALLSYMS |
char sym[KSYM_SYMBOL_LEN]; |
+#ifndef CONFIG_KALLSYMS |
+ struct module *mod; |
+ int len; |
#endif |
if (fmt[1] == 'R') |
@@ -684,11 +686,16 @@ char *symbol_string(char *buf, char *end |
sprint_symbol(sym, value); |
else |
sprint_symbol_no_offset(sym, value); |
- |
- return string(buf, end, sym, spec); |
#else |
- return special_hex_number(buf, end, value, sizeof(void *)); |
+ len = snprintf(sym, sizeof(sym), "0x%lx", value); |
+ |
+ mod = __module_address(value); |
+ if (mod) |
+ snprintf(sym + len, sizeof(sym) - len, " [%s@%p+0x%x]", |
+ mod->name, mod->core_layout.base, |
+ mod->core_layout.size); |
#endif |
+ return string(buf, end, sym, spec); |
} |
static noinline_for_stack |
/branches/18.06.1/target/linux/generic/pending-4.9/220-optimize_inlining.patch |
---|
@@ -0,0 +1,70 @@ |
--- a/arch/x86/Kconfig.debug |
+++ b/arch/x86/Kconfig.debug |
@@ -287,20 +287,6 @@ config CPA_DEBUG |
---help--- |
Do change_page_attr() self-tests every 30 seconds. |
-config OPTIMIZE_INLINING |
- bool "Allow gcc to uninline functions marked 'inline'" |
- ---help--- |
- This option determines if the kernel forces gcc to inline the functions |
- developers have marked 'inline'. Doing so takes away freedom from gcc to |
- do what it thinks is best, which is desirable for the gcc 3.x series of |
- compilers. The gcc 4.x series have a rewritten inlining algorithm and |
- enabling this option will generate a smaller kernel there. Hopefully |
- this algorithm is so good that allowing gcc 4.x and above to make the |
- decision will become the default in the future. Until then this option |
- is there to test gcc for this. |
- |
- If unsure, say N. |
- |
config DEBUG_ENTRY |
bool "Debug low-level entry code" |
depends on DEBUG_KERNEL |
--- a/lib/Kconfig.debug |
+++ b/lib/Kconfig.debug |
@@ -117,6 +117,20 @@ endmenu # "printk and dmesg options" |
menu "Compile-time checks and compiler options" |
+config OPTIMIZE_INLINING |
+ bool "Allow gcc to uninline functions marked 'inline'" |
+ ---help--- |
+ This option determines if the kernel forces gcc to inline the functions |
+ developers have marked 'inline'. Doing so takes away freedom from gcc to |
+ do what it thinks is best, which is desirable for the gcc 3.x series of |
+ compilers. The gcc 4.x series have a rewritten inlining algorithm and |
+ enabling this option will generate a smaller kernel there. Hopefully |
+ this algorithm is so good that allowing gcc 4.x and above to make the |
+ decision will become the default in the future. Until then this option |
+ is there to test gcc for this. |
+ |
+ If unsure, say N. |
+ |
config DEBUG_INFO |
bool "Compile the kernel with debug info" |
depends on DEBUG_KERNEL && !COMPILE_TEST |
--- a/arch/x86/Kconfig |
+++ b/arch/x86/Kconfig |
@@ -273,9 +273,6 @@ config ZONE_DMA32 |
config AUDIT_ARCH |
def_bool y if X86_64 |
-config ARCH_SUPPORTS_OPTIMIZED_INLINING |
- def_bool y |
- |
config ARCH_SUPPORTS_DEBUG_PAGEALLOC |
def_bool y |
--- a/include/linux/compiler-gcc.h |
+++ b/include/linux/compiler-gcc.h |
@@ -89,8 +89,7 @@ |
* of extern inline functions at link time. |
* A lot of inline functions can cause havoc with function tracing. |
*/ |
-#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ |
- !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) |
+#if !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) |
#define inline \ |
inline __attribute__((always_inline, unused)) notrace __gnu_inline |
#else |
/branches/18.06.1/target/linux/generic/pending-4.9/240-remove-unsane-filenames-from-deps_initramfs-list.patch |
---|
@@ -0,0 +1,47 @@ |
From: Gabor Juhos <juhosg@openwrt.org> |
Subject: usr: sanitize deps_initramfs list |
If any filename in the intramfs dependency |
list contains a colon, that causes a kernel |
build error like this: |
/devel/openwrt/build_dir/linux-ar71xx_generic/linux-3.6.6/usr/Makefile:58: *** multiple target patterns. Stop. |
make[5]: *** [usr] Error 2 |
Fix it by removing such filenames from the |
deps_initramfs list. |
Signed-off-by: Gabor Juhos <juhosg@openwrt.org> |
--- |
usr/Makefile | 8 +++++--- |
1 file changed, 5 insertions(+), 3 deletions(-) |
--- a/usr/Makefile |
+++ b/usr/Makefile |
@@ -53,6 +53,8 @@ ifneq ($(wildcard $(obj)/.initramfs_data |
include $(obj)/.initramfs_data.cpio.d |
endif |
+deps_initramfs_sane := $(foreach v,$(deps_initramfs),$(if $(findstring :,$(v)),,$(v))) |
+ |
quiet_cmd_initfs = GEN $@ |
cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input) |
@@ -61,14 +63,14 @@ targets := initramfs_data.cpio.gz initra |
initramfs_data.cpio.lzo initramfs_data.cpio.lz4 \ |
initramfs_data.cpio |
# do not try to update files included in initramfs |
-$(deps_initramfs): ; |
+$(deps_initramfs_sane): ; |
-$(deps_initramfs): klibcdirs |
+$(deps_initramfs_sane): klibcdirs |
# We rebuild initramfs_data.cpio if: |
# 1) Any included file is newer then initramfs_data.cpio |
# 2) There are changes in which files are included (added or deleted) |
# 3) If gen_init_cpio are newer than initramfs_data.cpio |
# 4) arguments to gen_initramfs.sh changes |
-$(obj)/initramfs_data.cpio$(suffix_y): $(obj)/gen_init_cpio $(deps_initramfs) klibcdirs |
+$(obj)/initramfs_data.cpio$(suffix_y): $(obj)/gen_init_cpio $(deps_initramfs_sane) klibcdirs |
$(Q)$(initramfs) -l $(ramfs-input) > $(obj)/.initramfs_data.cpio.d |
$(call if_changed,initfs) |
/branches/18.06.1/target/linux/generic/pending-4.9/261-enable_wilink_platform_without_drivers.patch |
---|
@@ -0,0 +1,20 @@ |
From: Imre Kaloz <kaloz@openwrt.org> |
Subject: [PATCH] hack: net: wireless: make the wl12xx glue code available with |
compat-wireless, too |
Signed-off-by: Imre Kaloz <kaloz@openwrt.org> |
--- |
drivers/net/wireless/ti/Kconfig | 2 +- |
1 file changed, 1 insertion(+), 1 deletion(-) |
--- a/drivers/net/wireless/ti/Kconfig |
+++ b/drivers/net/wireless/ti/Kconfig |
@@ -19,7 +19,7 @@ source "drivers/net/wireless/ti/wlcore/K |
config WILINK_PLATFORM_DATA |
bool "TI WiLink platform data" |
- depends on WLCORE_SDIO || WL1251_SDIO |
+ depends on WLCORE_SDIO || WL1251_SDIO || ARCH_OMAP2PLUS |
default y |
---help--- |
Small platform data bit needed to pass data to the sdio modules. |
/branches/18.06.1/target/linux/generic/pending-4.9/300-mips_expose_boot_raw.patch |
---|
@@ -0,0 +1,40 @@ |
From: Mark Miller <mark@mirell.org> |
Subject: mips: expose CONFIG_BOOT_RAW |
This exposes the CONFIG_BOOT_RAW symbol in Kconfig. This is needed on |
certain Broadcom chipsets running CFE in order to load the kernel. |
Signed-off-by: Mark Miller <mark@mirell.org> |
Acked-by: Rob Landley <rob@landley.net> |
--- |
--- a/arch/mips/Kconfig |
+++ b/arch/mips/Kconfig |
@@ -1068,9 +1068,6 @@ config FW_ARC |
config ARCH_MAY_HAVE_PC_FDC |
bool |
-config BOOT_RAW |
- bool |
- |
config CEVT_BCM1480 |
bool |
@@ -2967,6 +2964,18 @@ choice |
bool "Extend builtin kernel arguments with bootloader arguments" |
endchoice |
+config BOOT_RAW |
+ bool "Enable the kernel to be executed from the load address" |
+ default n |
+ help |
+ Allow the kernel to be executed from the load address for |
+ bootloaders which cannot read the ELF format. This places |
+ a jump to start_kernel at the load address. |
+ |
+ If unsure, say N. |
+ |
+ |
+ |
endmenu |
config LOCKDEP_SUPPORT |
/branches/18.06.1/target/linux/generic/pending-4.9/302-mips_no_branch_likely.patch |
---|
@@ -0,0 +1,22 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: mips: use -mno-branch-likely for kernel and userspace |
saves ~11k kernel size after lzma and ~12k squashfs size in the |
lede-commit: 41a039f46450ffae9483d6216422098669da2900 |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
arch/mips/Makefile | 2 +- |
1 file changed, 1 insertion(+), 1 deletion(-) |
--- a/arch/mips/Makefile |
+++ b/arch/mips/Makefile |
@@ -90,7 +90,7 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin |
# machines may also. Since BFD is incredibly buggy with respect to |
# crossformat linking we rely on the elf2ecoff tool for format conversion. |
# |
-cflags-y += -G 0 -mno-abicalls -fno-pic -pipe |
+cflags-y += -G 0 -mno-abicalls -fno-pic -pipe -mno-branch-likely |
cflags-y += -msoft-float |
LDFLAGS_vmlinux += -G 0 -static -n -nostdlib |
KBUILD_AFLAGS_MODULE += -mlong-calls |
/branches/18.06.1/target/linux/generic/pending-4.9/304-mips_disable_fpu.patch |
---|
@@ -0,0 +1,137 @@ |
From: Manuel Lauss <manuel.lauss@gmail.com> |
Subject: [RFC PATCH v4 2/2] MIPS: make FPU emulator optional |
This small patch makes the MIPS FPU emulator optional. The kernel |
kills float-users on systems without a hardware FPU by sending a SIGILL. |
Disabling the emulator shrinks vmlinux by about 54kBytes (32bit, |
optimizing for size). |
Signed-off-by: Manuel Lauss <manuel.lauss@gmail.com> |
--- |
v4: rediffed because of patch 1/2, should now work with micromips as well |
v3: updated patch description with size savings. |
v2: incorporated changes suggested by Jonas Gorski |
force the fpu emulator on for micromips: relocating the parts |
of the mmips code in the emulator to other areas would be a |
much larger change; I went the cheap route instead with this. |
arch/mips/Kbuild | 2 +- |
arch/mips/Kconfig | 14 ++++++++++++++ |
arch/mips/include/asm/fpu.h | 5 +++-- |
arch/mips/include/asm/fpu_emulator.h | 15 +++++++++++++++ |
4 files changed, 33 insertions(+), 3 deletions(-) |
--- a/arch/mips/Kconfig |
+++ b/arch/mips/Kconfig |
@@ -2891,6 +2891,20 @@ config MIPS_O32_FP64_SUPPORT |
If unsure, say N. |
+config MIPS_FPU_EMULATOR |
+ bool "MIPS FPU Emulator" |
+ default y |
+ help |
+ This option lets you disable the built-in MIPS FPU (Coprocessor 1) |
+ emulator, which handles floating-point instructions on processors |
+ without a hardware FPU. It is generally a good idea to keep the |
+ emulator built-in, unless you are perfectly sure you have a |
+ complete soft-float environment. With the emulator disabled, all |
+ users of float operations will be killed with an illegal instr- |
+ uction exception. |
+ |
+ Say Y, please. |
+ |
config USE_OF |
bool |
select OF |
--- a/arch/mips/Makefile |
+++ b/arch/mips/Makefile |
@@ -287,7 +287,7 @@ OBJCOPYFLAGS += --remove-section=.regin |
head-y := arch/mips/kernel/head.o |
libs-y += arch/mips/lib/ |
-libs-y += arch/mips/math-emu/ |
+libs-$(CONFIG_MIPS_FPU_EMULATOR) += arch/mips/math-emu/ |
# See arch/mips/Kbuild for content of core part of the kernel |
core-y += arch/mips/ |
--- a/arch/mips/include/asm/fpu.h |
+++ b/arch/mips/include/asm/fpu.h |
@@ -227,8 +227,10 @@ static inline int init_fpu(void) |
/* Restore FRE */ |
write_c0_config5(config5); |
enable_fpu_hazard(); |
- } else |
+ } else if (IS_ENABLED(CONFIG_MIPS_FPU_EMULATOR)) |
fpu_emulator_init_fpu(); |
+ else |
+ ret = SIGILL; |
return ret; |
} |
--- a/arch/mips/include/asm/fpu_emulator.h |
+++ b/arch/mips/include/asm/fpu_emulator.h |
@@ -30,6 +30,7 @@ |
#include <asm/local.h> |
#include <asm/processor.h> |
+#ifdef CONFIG_MIPS_FPU_EMULATOR |
#ifdef CONFIG_DEBUG_FS |
struct mips_fpu_emulator_stats { |
@@ -63,6 +64,16 @@ do { \ |
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, |
struct mips_fpu_struct *ctx, int has_fpu, |
void *__user *fault_addr); |
+#else /* no CONFIG_MIPS_FPU_EMULATOR */ |
+static inline int fpu_emulator_cop1Handler(struct pt_regs *xcp, |
+ struct mips_fpu_struct *ctx, int has_fpu, |
+ void *__user *fault_addr) |
+{ |
+ *fault_addr = NULL; |
+ return SIGILL; /* we don't speak MIPS FPU */ |
+} |
+#endif /* CONFIG_MIPS_FPU_EMULATOR */ |
+ |
void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr, |
struct task_struct *tsk); |
int process_fpemu_return(int sig, void __user *fault_addr, |
--- a/arch/mips/include/asm/dsemul.h |
+++ b/arch/mips/include/asm/dsemul.h |
@@ -41,6 +41,7 @@ struct task_struct; |
extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir, |
unsigned long branch_pc, unsigned long cont_pc); |
+#ifdef CONFIG_MIPS_FPU_EMULATOR |
/** |
* do_dsemulret() - Return from a delay slot 'emulation' frame |
* @xcp: User thread register context. |
@@ -88,5 +89,27 @@ extern bool dsemul_thread_rollback(struc |
* before @mm is freed in order to avoid memory leaks. |
*/ |
extern void dsemul_mm_cleanup(struct mm_struct *mm); |
+#else |
+static inline bool do_dsemulret(struct pt_regs *xcp) |
+{ |
+ return false; |
+} |
+ |
+static inline bool dsemul_thread_cleanup(struct task_struct *tsk) |
+{ |
+ return false; |
+} |
+ |
+static inline bool dsemul_thread_rollback(struct pt_regs *regs) |
+{ |
+ return false; |
+} |
+ |
+static inline void dsemul_mm_cleanup(struct mm_struct *mm) |
+{ |
+ |
+} |
+ |
+#endif |
#endif /* __MIPS_ASM_DSEMUL_H__ */ |
/branches/18.06.1/target/linux/generic/pending-4.9/305-mips_module_reloc.patch |
---|
@@ -0,0 +1,370 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: mips: replace -mlong-calls with -mno-long-calls to make function calls faster in kernel modules to achieve this, try to |
lede-commit: 3b3d64743ba2a874df9d70cd19e242205b0a788c |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
arch/mips/Makefile | 5 + |
arch/mips/include/asm/module.h | 5 + |
arch/mips/kernel/module.c | 279 ++++++++++++++++++++++++++++++++++++++++- |
3 files changed, 284 insertions(+), 5 deletions(-) |
--- a/arch/mips/Makefile |
+++ b/arch/mips/Makefile |
@@ -93,8 +93,18 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin |
cflags-y += -G 0 -mno-abicalls -fno-pic -pipe -mno-branch-likely |
cflags-y += -msoft-float |
LDFLAGS_vmlinux += -G 0 -static -n -nostdlib |
+ifdef CONFIG_64BIT |
KBUILD_AFLAGS_MODULE += -mlong-calls |
KBUILD_CFLAGS_MODULE += -mlong-calls |
+else |
+ ifdef CONFIG_DYNAMIC_FTRACE |
+ KBUILD_AFLAGS_MODULE += -mlong-calls |
+ KBUILD_CFLAGS_MODULE += -mlong-calls |
+ else |
+ KBUILD_AFLAGS_MODULE += -mno-long-calls |
+ KBUILD_CFLAGS_MODULE += -mno-long-calls |
+ endif |
+endif |
ifeq ($(CONFIG_RELOCATABLE),y) |
LDFLAGS_vmlinux += --emit-relocs |
--- a/arch/mips/include/asm/module.h |
+++ b/arch/mips/include/asm/module.h |
@@ -11,6 +11,11 @@ struct mod_arch_specific { |
const struct exception_table_entry *dbe_start; |
const struct exception_table_entry *dbe_end; |
struct mips_hi16 *r_mips_hi16_list; |
+ |
+ void *phys_plt_tbl; |
+ void *virt_plt_tbl; |
+ unsigned int phys_plt_offset; |
+ unsigned int virt_plt_offset; |
}; |
typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */ |
--- a/arch/mips/kernel/module.c |
+++ b/arch/mips/kernel/module.c |
@@ -44,14 +44,221 @@ struct mips_hi16 { |
static LIST_HEAD(dbe_list); |
static DEFINE_SPINLOCK(dbe_lock); |
-#ifdef MODULE_START |
+/* |
+ * Get the potential max trampolines size required of the init and |
+ * non-init sections. Only used if we cannot find enough contiguous |
+ * physically mapped memory to put the module into. |
+ */ |
+static unsigned int |
+get_plt_size(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, |
+ const char *secstrings, unsigned int symindex, bool is_init) |
+{ |
+ unsigned long ret = 0; |
+ unsigned int i, j; |
+ Elf_Sym *syms; |
+ |
+ /* Everything marked ALLOC (this includes the exported symbols) */ |
+ for (i = 1; i < hdr->e_shnum; ++i) { |
+ unsigned int info = sechdrs[i].sh_info; |
+ |
+ if (sechdrs[i].sh_type != SHT_REL |
+ && sechdrs[i].sh_type != SHT_RELA) |
+ continue; |
+ |
+ /* Not a valid relocation section? */ |
+ if (info >= hdr->e_shnum) |
+ continue; |
+ |
+ /* Don't bother with non-allocated sections */ |
+ if (!(sechdrs[info].sh_flags & SHF_ALLOC)) |
+ continue; |
+ |
+ /* If it's called *.init*, and we're not init, we're |
+ not interested */ |
+ if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0) |
+ != is_init) |
+ continue; |
+ |
+ syms = (Elf_Sym *) sechdrs[symindex].sh_addr; |
+ if (sechdrs[i].sh_type == SHT_REL) { |
+ Elf_Mips_Rel *rel = (void *) sechdrs[i].sh_addr; |
+ unsigned int size = sechdrs[i].sh_size / sizeof(*rel); |
+ |
+ for (j = 0; j < size; ++j) { |
+ Elf_Sym *sym; |
+ |
+ if (ELF_MIPS_R_TYPE(rel[j]) != R_MIPS_26) |
+ continue; |
+ |
+ sym = syms + ELF_MIPS_R_SYM(rel[j]); |
+ if (!is_init && sym->st_shndx != SHN_UNDEF) |
+ continue; |
+ |
+ ret += 4 * sizeof(int); |
+ } |
+ } else { |
+ Elf_Mips_Rela *rela = (void *) sechdrs[i].sh_addr; |
+ unsigned int size = sechdrs[i].sh_size / sizeof(*rela); |
+ |
+ for (j = 0; j < size; ++j) { |
+ Elf_Sym *sym; |
+ |
+ if (ELF_MIPS_R_TYPE(rela[j]) != R_MIPS_26) |
+ continue; |
+ |
+ sym = syms + ELF_MIPS_R_SYM(rela[j]); |
+ if (!is_init && sym->st_shndx != SHN_UNDEF) |
+ continue; |
+ |
+ ret += 4 * sizeof(int); |
+ } |
+ } |
+ } |
+ |
+ return ret; |
+} |
+ |
+#ifndef MODULE_START |
+static void *alloc_phys(unsigned long size) |
+{ |
+ unsigned order; |
+ struct page *page; |
+ struct page *p; |
+ |
+ size = PAGE_ALIGN(size); |
+ order = get_order(size); |
+ |
+ page = alloc_pages(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN | |
+ __GFP_THISNODE, order); |
+ if (!page) |
+ return NULL; |
+ |
+ split_page(page, order); |
+ |
+ /* mark all pages except for the last one */ |
+ for (p = page; p + 1 < page + (size >> PAGE_SHIFT); ++p) |
+ set_bit(PG_owner_priv_1, &p->flags); |
+ |
+ for (p = page + (size >> PAGE_SHIFT); p < page + (1 << order); ++p) |
+ __free_page(p); |
+ |
+ return page_address(page); |
+} |
+#endif |
+ |
+static void free_phys(void *ptr) |
+{ |
+ struct page *page; |
+ bool free; |
+ |
+ page = virt_to_page(ptr); |
+ do { |
+ free = test_and_clear_bit(PG_owner_priv_1, &page->flags); |
+ __free_page(page); |
+ page++; |
+ } while (free); |
+} |
+ |
+ |
void *module_alloc(unsigned long size) |
{ |
+#ifdef MODULE_START |
return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END, |
GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, |
__builtin_return_address(0)); |
+#else |
+ void *ptr; |
+ |
+ if (size == 0) |
+ return NULL; |
+ |
+ ptr = alloc_phys(size); |
+ |
+ /* If we failed to allocate physically contiguous memory, |
+ * fall back to regular vmalloc. The module loader code will |
+ * create jump tables to handle long jumps */ |
+ if (!ptr) |
+ return vmalloc(size); |
+ |
+ return ptr; |
+#endif |
} |
+ |
+static inline bool is_phys_addr(void *ptr) |
+{ |
+#ifdef CONFIG_64BIT |
+ return (KSEGX((unsigned long)ptr) == CKSEG0); |
+#else |
+ return (KSEGX(ptr) == KSEG0); |
#endif |
+} |
+ |
+/* Free memory returned from module_alloc */ |
+void module_memfree(void *module_region) |
+{ |
+ if (is_phys_addr(module_region)) |
+ free_phys(module_region); |
+ else |
+ vfree(module_region); |
+} |
+ |
+static void *__module_alloc(int size, bool phys) |
+{ |
+ void *ptr; |
+ |
+ if (phys) |
+ ptr = kmalloc(size, GFP_KERNEL); |
+ else |
+ ptr = vmalloc(size); |
+ return ptr; |
+} |
+ |
+static void __module_free(void *ptr) |
+{ |
+ if (is_phys_addr(ptr)) |
+ kfree(ptr); |
+ else |
+ vfree(ptr); |
+} |
+ |
+int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, |
+ char *secstrings, struct module *mod) |
+{ |
+ unsigned int symindex = 0; |
+ unsigned int core_size, init_size; |
+ int i; |
+ |
+ mod->arch.phys_plt_offset = 0; |
+ mod->arch.virt_plt_offset = 0; |
+ mod->arch.phys_plt_tbl = NULL; |
+ mod->arch.virt_plt_tbl = NULL; |
+ |
+ if (IS_ENABLED(CONFIG_64BIT)) |
+ return 0; |
+ |
+ for (i = 1; i < hdr->e_shnum; i++) |
+ if (sechdrs[i].sh_type == SHT_SYMTAB) |
+ symindex = i; |
+ |
+ core_size = get_plt_size(hdr, sechdrs, secstrings, symindex, false); |
+ init_size = get_plt_size(hdr, sechdrs, secstrings, symindex, true); |
+ |
+ if ((core_size + init_size) == 0) |
+ return 0; |
+ |
+ mod->arch.phys_plt_tbl = __module_alloc(core_size + init_size, 1); |
+ if (!mod->arch.phys_plt_tbl) |
+ return -ENOMEM; |
+ |
+ mod->arch.virt_plt_tbl = __module_alloc(core_size + init_size, 0); |
+ if (!mod->arch.virt_plt_tbl) { |
+ __module_free(mod->arch.phys_plt_tbl); |
+ mod->arch.phys_plt_tbl = NULL; |
+ return -ENOMEM; |
+ } |
+ |
+ return 0; |
+} |
int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v) |
{ |
@@ -65,8 +272,39 @@ static int apply_r_mips_32_rel(struct mo |
return 0; |
} |
+static Elf_Addr add_plt_entry_to(unsigned *plt_offset, |
+ void *start, Elf_Addr v) |
+{ |
+ unsigned *tramp = start + *plt_offset; |
+ *plt_offset += 4 * sizeof(int); |
+ |
+ /* adjust carry for addiu */ |
+ if (v & 0x00008000) |
+ v += 0x10000; |
+ |
+ tramp[0] = 0x3c190000 | (v >> 16); /* lui t9, hi16 */ |
+ tramp[1] = 0x27390000 | (v & 0xffff); /* addiu t9, t9, lo16 */ |
+ tramp[2] = 0x03200008; /* jr t9 */ |
+ tramp[3] = 0x00000000; /* nop */ |
+ |
+ return (Elf_Addr) tramp; |
+} |
+ |
+static Elf_Addr add_plt_entry(struct module *me, void *location, Elf_Addr v) |
+{ |
+ if (is_phys_addr(location)) |
+ return add_plt_entry_to(&me->arch.phys_plt_offset, |
+ me->arch.phys_plt_tbl, v); |
+ else |
+ return add_plt_entry_to(&me->arch.virt_plt_offset, |
+ me->arch.virt_plt_tbl, v); |
+ |
+} |
+ |
static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v) |
{ |
+ u32 ofs = *location & 0x03ffffff; |
+ |
if (v % 4) { |
pr_err("module %s: dangerous R_MIPS_26 REL relocation\n", |
me->name); |
@@ -74,13 +312,17 @@ static int apply_r_mips_26_rel(struct mo |
} |
if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) { |
- pr_err("module %s: relocation overflow\n", |
- me->name); |
- return -ENOEXEC; |
+ v = add_plt_entry(me, location, v + (ofs << 2)); |
+ if (!v) { |
+ pr_err("module %s: relocation overflow\n", |
+ me->name); |
+ return -ENOEXEC; |
+ } |
+ ofs = 0; |
} |
*location = (*location & ~0x03ffffff) | |
- ((*location + (v >> 2)) & 0x03ffffff); |
+ ((ofs + (v >> 2)) & 0x03ffffff); |
return 0; |
} |
@@ -349,9 +591,36 @@ int module_finalize(const Elf_Ehdr *hdr, |
list_add(&me->arch.dbe_list, &dbe_list); |
spin_unlock_irq(&dbe_lock); |
} |
+ |
+ /* Get rid of the fixup trampoline if we're running the module |
+ * from physically mapped address space */ |
+ if (me->arch.phys_plt_offset == 0) { |
+ __module_free(me->arch.phys_plt_tbl); |
+ me->arch.phys_plt_tbl = NULL; |
+ } |
+ if (me->arch.virt_plt_offset == 0) { |
+ __module_free(me->arch.virt_plt_tbl); |
+ me->arch.virt_plt_tbl = NULL; |
+ } |
+ |
return 0; |
} |
+void module_arch_freeing_init(struct module *mod) |
+{ |
+ if (mod->state == MODULE_STATE_LIVE) |
+ return; |
+ |
+ if (mod->arch.phys_plt_tbl) { |
+ __module_free(mod->arch.phys_plt_tbl); |
+ mod->arch.phys_plt_tbl = NULL; |
+ } |
+ if (mod->arch.virt_plt_tbl) { |
+ __module_free(mod->arch.virt_plt_tbl); |
+ mod->arch.virt_plt_tbl = NULL; |
+ } |
+} |
+ |
void module_arch_cleanup(struct module *mod) |
{ |
spin_lock_irq(&dbe_lock); |
/branches/18.06.1/target/linux/generic/pending-4.9/306-mips_mem_functions_performance.patch |
---|
@@ -0,0 +1,106 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: [PATCH] mips: allow the compiler to optimize memset, memcmp, memcpy for better performance and (in some instances) smaller code |
lede-commit: 07e59c7bc7f375f792ec9734be42fe4fa391a8bb |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
arch/mips/boot/compressed/Makefile | 3 ++- |
arch/mips/include/asm/string.h | 38 ++++++++++++++++++++++++++++++++++++++ |
arch/mips/lib/Makefile | 2 +- |
arch/mips/lib/memcmp.c | 22 ++++++++++++++++++++++ |
4 files changed, 63 insertions(+), 2 deletions(-) |
create mode 100644 arch/mips/lib/memcmp.c |
--- a/arch/mips/boot/compressed/Makefile |
+++ b/arch/mips/boot/compressed/Makefile |
@@ -23,7 +23,8 @@ KBUILD_CFLAGS := $(shell echo $(KBUILD_C |
KBUILD_CFLAGS := $(filter-out -fstack-protector, $(KBUILD_CFLAGS)) |
KBUILD_CFLAGS := $(LINUXINCLUDE) $(KBUILD_CFLAGS) -D__KERNEL__ \ |
- -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) -D"VMLINUX_LOAD_ADDRESS_ULL=$(VMLINUX_LOAD_ADDRESS)ull" |
+ -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) -D"VMLINUX_LOAD_ADDRESS_ULL=$(VMLINUX_LOAD_ADDRESS)ull" \ |
+ -D__ZBOOT__ |
KBUILD_AFLAGS := $(LINUXINCLUDE) $(KBUILD_AFLAGS) -D__ASSEMBLY__ \ |
-DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \ |
--- a/arch/mips/include/asm/string.h |
+++ b/arch/mips/include/asm/string.h |
@@ -140,4 +140,42 @@ extern void *memcpy(void *__to, __const_ |
#define __HAVE_ARCH_MEMMOVE |
extern void *memmove(void *__dest, __const__ void *__src, size_t __n); |
+#ifndef __ZBOOT__ |
+#define memset(__s, __c, len) \ |
+({ \ |
+ size_t __len = (len); \ |
+ void *__ret; \ |
+ if (__builtin_constant_p(len) && __len >= 64) \ |
+ __ret = memset((__s), (__c), __len); \ |
+ else \ |
+ __ret = __builtin_memset((__s), (__c), __len); \ |
+ __ret; \ |
+}) |
+ |
+#define memcpy(dst, src, len) \ |
+({ \ |
+ size_t __len = (len); \ |
+ void *__ret; \ |
+ if (__builtin_constant_p(len) && __len >= 64) \ |
+ __ret = memcpy((dst), (src), __len); \ |
+ else \ |
+ __ret = __builtin_memcpy((dst), (src), __len); \ |
+ __ret; \ |
+}) |
+ |
+#define memmove(dst, src, len) \ |
+({ \ |
+ size_t __len = (len); \ |
+ void *__ret; \ |
+ if (__builtin_constant_p(len) && __len >= 64) \ |
+ __ret = memmove((dst), (src), __len); \ |
+ else \ |
+ __ret = __builtin_memmove((dst), (src), __len); \ |
+ __ret; \ |
+}) |
+ |
+#define __HAVE_ARCH_MEMCMP |
+#define memcmp(src1, src2, len) __builtin_memcmp((src1), (src2), (len)) |
+#endif |
+ |
#endif /* _ASM_STRING_H */ |
--- a/arch/mips/lib/Makefile |
+++ b/arch/mips/lib/Makefile |
@@ -4,7 +4,7 @@ |
lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \ |
mips-atomic.o strlen_user.o strncpy_user.o \ |
- strnlen_user.o uncached.o |
+ strnlen_user.o uncached.o memcmp.o |
obj-y += iomap.o |
obj-$(CONFIG_PCI) += iomap-pci.o |
--- /dev/null |
+++ b/arch/mips/lib/memcmp.c |
@@ -0,0 +1,22 @@ |
+/* |
+ * copied from linux/lib/string.c |
+ * |
+ * Copyright (C) 1991, 1992 Linus Torvalds |
+ */ |
+ |
+#include <linux/module.h> |
+#include <linux/string.h> |
+ |
+#undef memcmp |
+int memcmp(const void *cs, const void *ct, size_t count) |
+{ |
+ const unsigned char *su1, *su2; |
+ int res = 0; |
+ |
+ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) |
+ if ((res = *su1 - *su2) != 0) |
+ break; |
+ return res; |
+} |
+EXPORT_SYMBOL(memcmp); |
+ |
/branches/18.06.1/target/linux/generic/pending-4.9/307-mips_highmem_offset.patch |
---|
@@ -0,0 +1,19 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: kernel: adjust mips highmem offset to avoid the need for -mlong-calls on systems with >256M RAM |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
arch/mips/include/asm/mach-generic/spaces.h | 2 +- |
1 file changed, 1 insertion(+), 1 deletion(-) |
--- a/arch/mips/include/asm/mach-generic/spaces.h |
+++ b/arch/mips/include/asm/mach-generic/spaces.h |
@@ -46,7 +46,7 @@ |
* Memory above this physical address will be considered highmem. |
*/ |
#ifndef HIGHMEM_START |
-#define HIGHMEM_START _AC(0x20000000, UL) |
+#define HIGHMEM_START _AC(0x10000000, UL) |
#endif |
#endif /* CONFIG_32BIT */ |
/branches/18.06.1/target/linux/generic/pending-4.9/308-mips32r2_tune.patch |
---|
@@ -0,0 +1,22 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: kernel: add -mtune=34kc to MIPS CFLAGS when building for mips32r2 |
This provides a good tradeoff across at least 24Kc-74Kc, while also |
producing smaller code. |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
arch/mips/Makefile | 2 +- |
1 file changed, 1 insertion(+), 1 deletion(-) |
--- a/arch/mips/Makefile |
+++ b/arch/mips/Makefile |
@@ -153,7 +153,7 @@ cflags-$(CONFIG_CPU_R4X00) += -march=r46 |
cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap |
cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \ |
-Wa,-mips32 -Wa,--trap |
-cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \ |
+cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2 -mtune=34kc,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \ |
-Wa,-mips32r2 -Wa,--trap |
cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap |
cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ |
/branches/18.06.1/target/linux/generic/pending-4.9/309-MIPS-Add-CPU-option-reporting-to-proc-cpuinfo.patch |
---|
@@ -0,0 +1,134 @@ |
From 87ec87c2ad615c1a177cd08ef5fa29fc739f6e50 Mon Sep 17 00:00:00 2001 |
From: Hauke Mehrtens <hauke@hauke-m.de> |
Date: Sun, 23 Dec 2018 18:06:53 +0100 |
Subject: [PATCH] MIPS: Add CPU option reporting to /proc/cpuinfo |
Many MIPS CPUs have optional CPU features which are not activates for |
all CPU cores. Print the CPU options which are implemented in the core |
in /proc/cpuinfo. This makes it possible to see what features are |
supported and which are not supported. This should cover all standard |
MIPS extensions, before it only printed information about the main MIPS |
ASEs. |
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de> |
--- |
arch/mips/kernel/proc.c | 116 ++++++++++++++++++++++++++++++++++++++++ |
1 file changed, 116 insertions(+) |
--- a/arch/mips/kernel/proc.c |
+++ b/arch/mips/kernel/proc.c |
@@ -128,6 +128,114 @@ static int show_cpuinfo(struct seq_file |
seq_printf(m, "micromips kernel\t: %s\n", |
(read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no"); |
} |
+ |
+ seq_printf(m, "Options implemented\t:"); |
+ if (cpu_has_tlb) |
+ seq_printf(m, "%s", " tlb"); |
+ if (cpu_has_ftlb) |
+ seq_printf(m, "%s", " ftlb"); |
+ if (cpu_has_tlbinv) |
+ seq_printf(m, "%s", " tlbinv"); |
+ if (cpu_has_segments) |
+ seq_printf(m, "%s", " segments"); |
+ if (cpu_has_rixiex) |
+ seq_printf(m, "%s", " rixiex"); |
+ if (cpu_has_ldpte) |
+ seq_printf(m, "%s", " ldpte"); |
+ if (cpu_has_maar) |
+ seq_printf(m, "%s", " maar"); |
+ if (cpu_has_rw_llb) |
+ seq_printf(m, "%s", " rw_llb"); |
+ if (cpu_has_4kex) |
+ seq_printf(m, "%s", " 4kex"); |
+ if (cpu_has_3k_cache) |
+ seq_printf(m, "%s", " 3k_cache"); |
+ if (cpu_has_4k_cache) |
+ seq_printf(m, "%s", " 4k_cache"); |
+ if (cpu_has_6k_cache) |
+ seq_printf(m, "%s", " 6k_cache"); |
+ if (cpu_has_8k_cache) |
+ seq_printf(m, "%s", " 8k_cache"); |
+ if (cpu_has_tx39_cache) |
+ seq_printf(m, "%s", " tx39_cache"); |
+ if (cpu_has_octeon_cache) |
+ seq_printf(m, "%s", " octeon_cache"); |
+ if (cpu_has_fpu) |
+ seq_printf(m, "%s", " fpu"); |
+ if (cpu_has_32fpr) |
+ seq_printf(m, "%s", " 32fpr"); |
+ if (cpu_has_cache_cdex_p) |
+ seq_printf(m, "%s", " cache_cdex_p"); |
+ if (cpu_has_cache_cdex_s) |
+ seq_printf(m, "%s", " cache_cdex_s"); |
+ if (cpu_has_prefetch) |
+ seq_printf(m, "%s", " prefetch"); |
+ if (cpu_has_mcheck) |
+ seq_printf(m, "%s", " mcheck"); |
+ if (cpu_has_ejtag) |
+ seq_printf(m, "%s", " ejtag"); |
+ if (cpu_has_llsc) |
+ seq_printf(m, "%s", " llsc"); |
+ if (cpu_has_bp_ghist) |
+ seq_printf(m, "%s", " bp_ghist"); |
+ if (cpu_has_guestctl0ext) |
+ seq_printf(m, "%s", " guestctl0ext"); |
+ if (cpu_has_guestctl1) |
+ seq_printf(m, "%s", " guestctl1"); |
+ if (cpu_has_guestctl2) |
+ seq_printf(m, "%s", " guestctl2"); |
+ if (cpu_has_guestid) |
+ seq_printf(m, "%s", " guestid"); |
+ if (cpu_has_drg) |
+ seq_printf(m, "%s", " drg"); |
+ if (cpu_has_rixi) |
+ seq_printf(m, "%s", " rixi"); |
+ if (cpu_has_lpa) |
+ seq_printf(m, "%s", " lpa"); |
+ if (cpu_has_mvh) |
+ seq_printf(m, "%s", " mvh"); |
+ if (cpu_has_vtag_icache) |
+ seq_printf(m, "%s", " vtag_icache"); |
+ if (cpu_has_dc_aliases) |
+ seq_printf(m, "%s", " dc_aliases"); |
+ if (cpu_has_ic_fills_f_dc) |
+ seq_printf(m, "%s", " ic_fills_f_dc"); |
+ if (cpu_has_pindexed_dcache) |
+ seq_printf(m, "%s", " pindexed_dcache"); |
+ if (cpu_has_userlocal) |
+ seq_printf(m, "%s", " userlocal"); |
+ if (cpu_has_nofpuex) |
+ seq_printf(m, "%s", " nofpuex"); |
+ if (cpu_has_vint) |
+ seq_printf(m, "%s", " vint"); |
+ if (cpu_has_veic) |
+ seq_printf(m, "%s", " veic"); |
+ if (cpu_has_inclusive_pcaches) |
+ seq_printf(m, "%s", " inclusive_pcaches"); |
+ if (cpu_has_perf_cntr_intr_bit) |
+ seq_printf(m, "%s", " perf_cntr_intr_bit"); |
+ if (cpu_has_fre) |
+ seq_printf(m, "%s", " fre"); |
+ if (cpu_has_cdmm) |
+ seq_printf(m, "%s", " cdmm"); |
+ if (cpu_has_small_pages) |
+ seq_printf(m, "%s", " small_pages"); |
+ if (cpu_has_nan_legacy) |
+ seq_printf(m, "%s", " nan_legacy"); |
+ if (cpu_has_nan_2008) |
+ seq_printf(m, "%s", " nan_2008"); |
+ if (cpu_has_ebase_wg) |
+ seq_printf(m, "%s", " ebase_wg"); |
+ if (cpu_has_badinstr) |
+ seq_printf(m, "%s", " badinstr"); |
+ if (cpu_has_badinstrp) |
+ seq_printf(m, "%s", " badinstrp"); |
+ if (cpu_has_contextconfig) |
+ seq_printf(m, "%s", " contextconfig"); |
+ if (cpu_has_perf) |
+ seq_printf(m, "%s", " perf"); |
+ seq_printf(m, "\n"); |
+ |
seq_printf(m, "shadow register sets\t: %d\n", |
cpu_data[n].srsets); |
seq_printf(m, "kscratch registers\t: %d\n", |
/branches/18.06.1/target/linux/generic/pending-4.9/310-arm_module_unresolved_weak_sym.patch |
---|
@@ -0,0 +1,22 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: fix errors in unresolved weak symbols on arm |
lede-commit: 570699d4838a907c3ef9f2819bf19eb72997b32f |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
arch/arm/kernel/module.c | 4 ++++ |
1 file changed, 4 insertions(+) |
--- a/arch/arm/kernel/module.c |
+++ b/arch/arm/kernel/module.c |
@@ -88,6 +88,10 @@ apply_relocate(Elf32_Shdr *sechdrs, cons |
return -ENOEXEC; |
} |
+ if ((IS_ERR_VALUE(sym->st_value) || !sym->st_value) && |
+ ELF_ST_BIND(sym->st_info) == STB_WEAK) |
+ continue; |
+ |
loc = dstsec->sh_addr + rel->r_offset; |
switch (ELF32_R_TYPE(rel->r_info)) { |
/branches/18.06.1/target/linux/generic/pending-4.9/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch |
---|
@@ -0,0 +1,269 @@ |
From: Yousong Zhou <yszhou4tech@gmail.com> |
Subject: MIPS: kexec: Accept command line parameters from userspace. |
Signed-off-by: Yousong Zhou <yszhou4tech@gmail.com> |
--- |
arch/mips/kernel/machine_kexec.c | 153 +++++++++++++++++++++++++++++++----- |
arch/mips/kernel/machine_kexec.h | 20 +++++ |
arch/mips/kernel/relocate_kernel.S | 21 +++-- |
3 files changed, 167 insertions(+), 27 deletions(-) |
create mode 100644 arch/mips/kernel/machine_kexec.h |
--- a/arch/mips/kernel/machine_kexec.c |
+++ b/arch/mips/kernel/machine_kexec.c |
@@ -10,14 +10,11 @@ |
#include <linux/mm.h> |
#include <linux/delay.h> |
+#include <asm/bootinfo.h> |
#include <asm/cacheflush.h> |
#include <asm/page.h> |
- |
-extern const unsigned char relocate_new_kernel[]; |
-extern const size_t relocate_new_kernel_size; |
- |
-extern unsigned long kexec_start_address; |
-extern unsigned long kexec_indirection_page; |
+#include <asm/uaccess.h> |
+#include "machine_kexec.h" |
int (*_machine_kexec_prepare)(struct kimage *) = NULL; |
void (*_machine_kexec_shutdown)(void) = NULL; |
@@ -28,9 +25,115 @@ atomic_t kexec_ready_to_reboot = ATOMIC_ |
void (*_crash_smp_send_stop)(void) = NULL; |
#endif |
+static void machine_kexec_print_args(void) |
+{ |
+ unsigned long argc = (int)kexec_args[0]; |
+ int i; |
+ |
+ pr_info("kexec_args[0] (argc): %lu\n", argc); |
+ pr_info("kexec_args[1] (argv): %p\n", (void *)kexec_args[1]); |
+ pr_info("kexec_args[2] (env ): %p\n", (void *)kexec_args[2]); |
+ pr_info("kexec_args[3] (desc): %p\n", (void *)kexec_args[3]); |
+ |
+ for (i = 0; i < argc; i++) { |
+ pr_info("kexec_argv[%d] = %p, %s\n", |
+ i, kexec_argv[i], kexec_argv[i]); |
+ } |
+} |
+ |
+static void machine_kexec_init_argv(struct kimage *image) |
+{ |
+ void __user *buf = NULL; |
+ size_t bufsz; |
+ size_t size; |
+ int i; |
+ |
+ bufsz = 0; |
+ for (i = 0; i < image->nr_segments; i++) { |
+ struct kexec_segment *seg; |
+ |
+ seg = &image->segment[i]; |
+ if (seg->bufsz < 6) |
+ continue; |
+ |
+ if (strncmp((char *) seg->buf, "kexec ", 6)) |
+ continue; |
+ |
+ buf = seg->buf; |
+ bufsz = seg->bufsz; |
+ break; |
+ } |
+ |
+ if (!buf) |
+ return; |
+ |
+ size = KEXEC_COMMAND_LINE_SIZE; |
+ size = min(size, bufsz); |
+ if (size < bufsz) |
+ pr_warn("kexec command line truncated to %zd bytes\n", size); |
+ |
+ /* Copy to kernel space */ |
+ copy_from_user(kexec_argv_buf, buf, size); |
+ kexec_argv_buf[size - 1] = 0; |
+} |
+ |
+static void machine_kexec_parse_argv(struct kimage *image) |
+{ |
+ char *reboot_code_buffer; |
+ int reloc_delta; |
+ char *ptr; |
+ int argc; |
+ int i; |
+ |
+ ptr = kexec_argv_buf; |
+ argc = 0; |
+ |
+ /* |
+ * convert command line string to array of parameters |
+ * (as bootloader does). |
+ */ |
+ while (ptr && *ptr && (KEXEC_MAX_ARGC > argc)) { |
+ if (*ptr == ' ') { |
+ *ptr++ = '\0'; |
+ continue; |
+ } |
+ |
+ kexec_argv[argc++] = ptr; |
+ ptr = strchr(ptr, ' '); |
+ } |
+ |
+ if (!argc) |
+ return; |
+ |
+ kexec_args[0] = argc; |
+ kexec_args[1] = (unsigned long)kexec_argv; |
+ kexec_args[2] = 0; |
+ kexec_args[3] = 0; |
+ |
+ reboot_code_buffer = page_address(image->control_code_page); |
+ reloc_delta = reboot_code_buffer - (char *)kexec_relocate_new_kernel; |
+ |
+ kexec_args[1] += reloc_delta; |
+ for (i = 0; i < argc; i++) |
+ kexec_argv[i] += reloc_delta; |
+} |
+ |
int |
machine_kexec_prepare(struct kimage *kimage) |
{ |
+ /* |
+ * Whenever arguments passed from kexec-tools, Init the arguments as |
+ * the original ones to try avoiding booting failure. |
+ */ |
+ |
+ kexec_args[0] = fw_arg0; |
+ kexec_args[1] = fw_arg1; |
+ kexec_args[2] = fw_arg2; |
+ kexec_args[3] = fw_arg3; |
+ |
+ machine_kexec_init_argv(kimage); |
+ machine_kexec_parse_argv(kimage); |
+ |
if (_machine_kexec_prepare) |
return _machine_kexec_prepare(kimage); |
return 0; |
@@ -67,10 +170,12 @@ machine_kexec(struct kimage *image) |
unsigned long *ptr; |
reboot_code_buffer = |
- (unsigned long)page_address(image->control_code_page); |
+ (unsigned long)page_address(image->control_code_page); |
+ pr_info("reboot_code_buffer = %p\n", (void *)reboot_code_buffer); |
kexec_start_address = |
(unsigned long) phys_to_virt(image->start); |
+ pr_info("kexec_start_address = %p\n", (void *)kexec_start_address); |
if (image->type == KEXEC_TYPE_DEFAULT) { |
kexec_indirection_page = |
@@ -78,9 +183,19 @@ machine_kexec(struct kimage *image) |
} else { |
kexec_indirection_page = (unsigned long)&image->head; |
} |
+ pr_info("kexec_indirection_page = %p\n", (void *)kexec_indirection_page); |
- memcpy((void*)reboot_code_buffer, relocate_new_kernel, |
- relocate_new_kernel_size); |
+ pr_info("Where is memcpy: %p\n", memcpy); |
+ pr_info("kexec_relocate_new_kernel = %p, kexec_relocate_new_kernel_end = %p\n", |
+ (void *)kexec_relocate_new_kernel, &kexec_relocate_new_kernel_end); |
+ pr_info("Copy %lu bytes from %p to %p\n", KEXEC_RELOCATE_NEW_KERNEL_SIZE, |
+ (void *)kexec_relocate_new_kernel, (void *)reboot_code_buffer); |
+ memcpy((void*)reboot_code_buffer, kexec_relocate_new_kernel, |
+ KEXEC_RELOCATE_NEW_KERNEL_SIZE); |
+ |
+ pr_info("Before _print_args().\n"); |
+ machine_kexec_print_args(); |
+ pr_info("Before eval loop.\n"); |
/* |
* The generic kexec code builds a page list with physical |
@@ -102,15 +217,16 @@ machine_kexec(struct kimage *image) |
/* |
* we do not want to be bothered. |
*/ |
+ pr_info("Before irq_disable.\n"); |
local_irq_disable(); |
- printk("Will call new kernel at %08lx\n", image->start); |
- printk("Bye ...\n"); |
+ pr_info("Will call new kernel at %08lx\n", image->start); |
+ pr_info("Bye ...\n"); |
__flush_cache_all(); |
#ifdef CONFIG_SMP |
/* All secondary cpus now may jump to kexec_wait cycle */ |
relocated_kexec_smp_wait = reboot_code_buffer + |
- (void *)(kexec_smp_wait - relocate_new_kernel); |
+ (void *)(kexec_smp_wait - kexec_relocate_new_kernel); |
smp_wmb(); |
atomic_set(&kexec_ready_to_reboot, 1); |
#endif |
--- /dev/null |
+++ b/arch/mips/kernel/machine_kexec.h |
@@ -0,0 +1,20 @@ |
+#ifndef _MACHINE_KEXEC_H |
+#define _MACHINE_KEXEC_H |
+ |
+#ifndef __ASSEMBLY__ |
+extern const unsigned char kexec_relocate_new_kernel[]; |
+extern unsigned long kexec_relocate_new_kernel_end; |
+extern unsigned long kexec_start_address; |
+extern unsigned long kexec_indirection_page; |
+ |
+extern char kexec_argv_buf[]; |
+extern char *kexec_argv[]; |
+ |
+#define KEXEC_RELOCATE_NEW_KERNEL_SIZE ((unsigned long)&kexec_relocate_new_kernel_end - (unsigned long)kexec_relocate_new_kernel) |
+#endif /* !__ASSEMBLY__ */ |
+ |
+#define KEXEC_COMMAND_LINE_SIZE 256 |
+#define KEXEC_ARGV_SIZE (KEXEC_COMMAND_LINE_SIZE / 16) |
+#define KEXEC_MAX_ARGC (KEXEC_ARGV_SIZE / sizeof(long)) |
+ |
+#endif |
--- a/arch/mips/kernel/relocate_kernel.S |
+++ b/arch/mips/kernel/relocate_kernel.S |
@@ -12,8 +12,9 @@ |
#include <asm/mipsregs.h> |
#include <asm/stackframe.h> |
#include <asm/addrspace.h> |
+#include "machine_kexec.h" |
-LEAF(relocate_new_kernel) |
+LEAF(kexec_relocate_new_kernel) |
PTR_L a0, arg0 |
PTR_L a1, arg1 |
PTR_L a2, arg2 |
@@ -98,7 +99,7 @@ done: |
#endif |
/* jump to kexec_start_address */ |
j s1 |
- END(relocate_new_kernel) |
+ END(kexec_relocate_new_kernel) |
#ifdef CONFIG_SMP |
/* |
@@ -184,9 +185,15 @@ kexec_indirection_page: |
PTR 0 |
.size kexec_indirection_page, PTRSIZE |
-relocate_new_kernel_end: |
+kexec_argv_buf: |
+ EXPORT(kexec_argv_buf) |
+ .skip KEXEC_COMMAND_LINE_SIZE |
+ .size kexec_argv_buf, KEXEC_COMMAND_LINE_SIZE |
+ |
+kexec_argv: |
+ EXPORT(kexec_argv) |
+ .skip KEXEC_ARGV_SIZE |
+ .size kexec_argv, KEXEC_ARGV_SIZE |
-relocate_new_kernel_size: |
- EXPORT(relocate_new_kernel_size) |
- PTR relocate_new_kernel_end - relocate_new_kernel |
- .size relocate_new_kernel_size, PTRSIZE |
+kexec_relocate_new_kernel_end: |
+ EXPORT(kexec_relocate_new_kernel_end) |
/branches/18.06.1/target/linux/generic/pending-4.9/332-arc-add-OWRTDTB-section.patch |
---|
@@ -0,0 +1,80 @@ |
From: Alexey Brodkin <abrodkin@synopsys.com> |
Subject: openwrt: arc - add OWRTDTB section |
This change allows OpenWRT to patch resulting kernel binary with |
external .dtb. |
That allows us to re-use exactky the same vmlinux on different boards |
given its ARC core configurations match (at least cache line sizes etc). |
""patch-dtb" searches for ASCII "OWRTDTB:" strign and copies external |
.dtb right after it, keeping the string in place. |
Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com> |
--- |
arch/arc/kernel/head.S | 10 ++++++++++ |
arch/arc/kernel/setup.c | 4 +++- |
arch/arc/kernel/vmlinux.lds.S | 13 +++++++++++++ |
3 files changed, 26 insertions(+), 1 deletion(-) |
--- a/arch/arc/kernel/head.S |
+++ b/arch/arc/kernel/head.S |
@@ -49,6 +49,16 @@ |
1: |
.endm |
+; Here "patch-dtb" will embed external .dtb |
+; Note "patch-dtb" searches for ASCII "OWRTDTB:" string |
+; and pastes .dtb right after it, hense the string precedes |
+; __image_dtb symbol. |
+ .section .owrt, "aw",@progbits |
+ .ascii "OWRTDTB:" |
+ENTRY(__image_dtb) |
+ .fill 0x4000 |
+END(__image_dtb) |
+ |
.section .init.text, "ax",@progbits |
;---------------------------------------------------------------- |
--- a/arch/arc/kernel/setup.c |
+++ b/arch/arc/kernel/setup.c |
@@ -388,6 +388,8 @@ static inline int is_kernel(unsigned lon |
return 0; |
} |
+extern struct boot_param_header __image_dtb; |
+ |
void __init setup_arch(char **cmdline_p) |
{ |
#ifdef CONFIG_ARC_UBOOT_SUPPORT |
@@ -401,7 +403,7 @@ void __init setup_arch(char **cmdline_p) |
#endif |
{ |
/* No, so try the embedded one */ |
- machine_desc = setup_machine_fdt(__dtb_start); |
+ machine_desc = setup_machine_fdt(&__image_dtb); |
if (!machine_desc) |
panic("Embedded DT invalid\n"); |
--- a/arch/arc/kernel/vmlinux.lds.S |
+++ b/arch/arc/kernel/vmlinux.lds.S |
@@ -30,6 +30,19 @@ SECTIONS |
. = CONFIG_LINUX_LINK_BASE; |
+ /* |
+ * In OpenWRT we want to patch built binary embedding .dtb of choice. |
+ * This is implemented with "patch-dtb" utility which searches for |
+ * "OWRTDTB:" string in first 16k of image and if it is found |
+ * copies .dtb right after mentioned string. |
+ * |
+ * Note: "OWRTDTB:" won't be overwritten with .dtb, .dtb will follow it. |
+ */ |
+ .owrt : { |
+ *(.owrt) |
+ . = ALIGN(PAGE_SIZE); |
+ } |
+ |
_int_vec_base_lds = .; |
.vector : { |
*(.vector) |
/branches/18.06.1/target/linux/generic/pending-4.9/333-arc-enable-unaligned-access-in-kernel-mode.patch |
---|
@@ -0,0 +1,24 @@ |
From: Alexey Brodkin <abrodkin@synopsys.com> |
Subject: arc: enable unaligned access in kernel mode |
This enables misaligned access handling even in kernel mode. |
Some wireless drivers (ath9k-htc and mt7601u) use misaligned accesses |
here and there and to cope with that without fixing stuff in the drivers |
we're just gracefully handling it on ARC. |
Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com> |
--- |
arch/arc/kernel/unaligned.c | 2 +- |
1 file changed, 1 insertion(+), 1 deletion(-) |
--- a/arch/arc/kernel/unaligned.c |
+++ b/arch/arc/kernel/unaligned.c |
@@ -206,7 +206,7 @@ int misaligned_fixup(unsigned long addre |
char buf[TASK_COMM_LEN]; |
/* handle user mode only and only if enabled by sysadmin */ |
- if (!user_mode(regs) || !unaligned_enabled) |
+ if (!unaligned_enabled) |
return 1; |
if (no_unaligned_warning) { |
/branches/18.06.1/target/linux/generic/pending-4.9/340-MIPS-mm-remove-mips_dma_mapping_error.patch |
---|
@@ -0,0 +1,32 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Date: Tue, 5 Dec 2017 12:34:31 +0100 |
Subject: [PATCH] MIPS: mm: remove mips_dma_mapping_error |
dma_mapping_error() already checks if ops->mapping_error is a null |
pointer |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
--- a/arch/mips/mm/dma-default.c |
+++ b/arch/mips/mm/dma-default.c |
@@ -394,11 +394,6 @@ static void mips_dma_sync_sg_for_device( |
} |
} |
-int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
-{ |
- return 0; |
-} |
- |
int mips_dma_supported(struct device *dev, u64 mask) |
{ |
return plat_dma_supported(dev, mask); |
@@ -427,7 +422,6 @@ static struct dma_map_ops mips_default_d |
.sync_single_for_device = mips_dma_sync_single_for_device, |
.sync_sg_for_cpu = mips_dma_sync_sg_for_cpu, |
.sync_sg_for_device = mips_dma_sync_sg_for_device, |
- .mapping_error = mips_dma_mapping_error, |
.dma_supported = mips_dma_supported |
}; |
/branches/18.06.1/target/linux/generic/pending-4.9/341-MIPS-mm-remove-no-op-dma_map_ops-where-possible.patch |
---|
@@ -0,0 +1,140 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Date: Tue, 5 Dec 2017 12:46:01 +0100 |
Subject: [PATCH] MIPS: mm: remove no-op dma_map_ops where possible |
If no post-DMA flush is required, and the platform does not provide |
plat_unmap_dma_mem(), there is no need to include unmap or sync_for_cpu |
ops. |
With this patch they are compiled out to improve icache footprint |
on devices that handle lots of DMA traffic (especially network routers). |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
--- a/arch/mips/Kconfig |
+++ b/arch/mips/Kconfig |
@@ -214,6 +214,7 @@ config BMIPS_GENERIC |
select BRCMSTB_L2_IRQ |
select IRQ_MIPS_CPU |
select DMA_NONCOHERENT |
+ select DMA_UNMAP_POST_FLUSH |
select SYS_SUPPORTS_32BIT_KERNEL |
select SYS_SUPPORTS_LITTLE_ENDIAN |
select SYS_SUPPORTS_BIG_ENDIAN |
@@ -339,6 +340,7 @@ config MACH_JAZZ |
select CSRC_R4K |
select DEFAULT_SGI_PARTITION if CPU_BIG_ENDIAN |
select GENERIC_ISA_DMA |
+ select DMA_UNMAP_POST_FLUSH |
select HAVE_PCSPKR_PLATFORM |
select IRQ_MIPS_CPU |
select I8253 |
@@ -1129,6 +1131,9 @@ config DMA_NONCOHERENT |
bool |
select NEED_DMA_MAP_STATE |
+config DMA_UNMAP_POST_FLUSH |
+ bool |
+ |
config NEED_DMA_MAP_STATE |
bool |
@@ -1653,6 +1658,7 @@ config CPU_R10000 |
select CPU_SUPPORTS_64BIT_KERNEL |
select CPU_SUPPORTS_HIGHMEM |
select CPU_SUPPORTS_HUGEPAGES |
+ select DMA_UNMAP_POST_FLUSH |
help |
MIPS Technologies R10000-series processors. |
@@ -1898,9 +1904,11 @@ config SYS_HAS_CPU_MIPS32_R3_5 |
bool |
config SYS_HAS_CPU_MIPS32_R5 |
+ select DMA_UNMAP_POST_FLUSH |
bool |
config SYS_HAS_CPU_MIPS32_R6 |
+ select DMA_UNMAP_POST_FLUSH |
bool |
config SYS_HAS_CPU_MIPS64_R1 |
@@ -1910,6 +1918,7 @@ config SYS_HAS_CPU_MIPS64_R2 |
bool |
config SYS_HAS_CPU_MIPS64_R6 |
+ select DMA_UNMAP_POST_FLUSH |
bool |
config SYS_HAS_CPU_R3000 |
--- a/arch/mips/mm/dma-default.c |
+++ b/arch/mips/mm/dma-default.c |
@@ -290,8 +290,9 @@ static inline void __dma_sync(struct pag |
} while (left); |
} |
-static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, |
- size_t size, enum dma_data_direction direction, unsigned long attrs) |
+static void __maybe_unused |
+mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, |
+ enum dma_data_direction direction, unsigned long attrs) |
{ |
if (cpu_needs_post_dma_flush(dev)) |
__dma_sync(dma_addr_to_page(dev, dma_addr), |
@@ -330,9 +331,10 @@ static dma_addr_t mips_dma_map_page(stru |
return plat_map_dma_mem_page(dev, page) + offset; |
} |
-static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
- int nhwentries, enum dma_data_direction direction, |
- unsigned long attrs) |
+static void __maybe_unused |
+mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
+ int nhwentries, enum dma_data_direction direction, |
+ unsigned long attrs) |
{ |
int i; |
struct scatterlist *sg; |
@@ -346,8 +348,9 @@ static void mips_dma_unmap_sg(struct dev |
} |
} |
-static void mips_dma_sync_single_for_cpu(struct device *dev, |
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) |
+static void __maybe_unused |
+mips_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
+ size_t size, enum dma_data_direction direction) |
{ |
if (cpu_needs_post_dma_flush(dev)) |
__dma_sync(dma_addr_to_page(dev, dma_handle), |
@@ -363,9 +366,9 @@ static void mips_dma_sync_single_for_dev |
dma_handle & ~PAGE_MASK, size, direction); |
} |
-static void mips_dma_sync_sg_for_cpu(struct device *dev, |
- struct scatterlist *sglist, int nelems, |
- enum dma_data_direction direction) |
+static void __maybe_unused |
+mips_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, |
+ int nelems, enum dma_data_direction direction) |
{ |
int i; |
struct scatterlist *sg; |
@@ -415,12 +418,14 @@ static struct dma_map_ops mips_default_d |
.free = mips_dma_free_coherent, |
.mmap = mips_dma_mmap, |
.map_page = mips_dma_map_page, |
- .unmap_page = mips_dma_unmap_page, |
.map_sg = mips_dma_map_sg, |
+#ifdef CONFIG_DMA_UNMAP_POST_FLUSH |
+ .unmap_page = mips_dma_unmap_page, |
.unmap_sg = mips_dma_unmap_sg, |
.sync_single_for_cpu = mips_dma_sync_single_for_cpu, |
- .sync_single_for_device = mips_dma_sync_single_for_device, |
.sync_sg_for_cpu = mips_dma_sync_sg_for_cpu, |
+#endif |
+ .sync_single_for_device = mips_dma_sync_single_for_device, |
.sync_sg_for_device = mips_dma_sync_sg_for_device, |
.dma_supported = mips_dma_supported |
}; |
/branches/18.06.1/target/linux/generic/pending-4.9/400-mtd-add-rootfs-split-support.patch |
---|
@@ -0,0 +1,108 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: make rootfs split/detection more generic - patch can be moved to generic-2.6 after testing on other platforms |
lede-commit: 328e660b31f0937d52c5ae3d6e7029409918a9df |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
drivers/mtd/Kconfig | 17 +++++++++++++++++ |
drivers/mtd/mtdpart.c | 35 +++++++++++++++++++++++++++++++++++ |
include/linux/mtd/partitions.h | 2 ++ |
3 files changed, 54 insertions(+) |
--- a/drivers/mtd/Kconfig |
+++ b/drivers/mtd/Kconfig |
@@ -12,6 +12,23 @@ menuconfig MTD |
if MTD |
+menu "OpenWrt specific MTD options" |
+ |
+config MTD_ROOTFS_ROOT_DEV |
+ bool "Automatically set 'rootfs' partition to be root filesystem" |
+ default y |
+ |
+config MTD_SPLIT_FIRMWARE |
+ bool "Automatically split firmware partition for kernel+rootfs" |
+ default y |
+ |
+config MTD_SPLIT_FIRMWARE_NAME |
+ string "Firmware partition name" |
+ depends on MTD_SPLIT_FIRMWARE |
+ default "firmware" |
+ |
+endmenu |
+ |
config MTD_TESTS |
tristate "MTD tests support (DANGEROUS)" |
depends on m |
--- a/drivers/mtd/mtdpart.c |
+++ b/drivers/mtd/mtdpart.c |
@@ -29,11 +29,13 @@ |
#include <linux/kmod.h> |
#include <linux/mtd/mtd.h> |
#include <linux/mtd/partitions.h> |
+#include <linux/magic.h> |
#include <linux/of.h> |
#include <linux/err.h> |
#include <linux/of.h> |
#include "mtdcore.h" |
+#include "mtdsplit/mtdsplit.h" |
/* Our partition linked list */ |
static LIST_HEAD(mtd_partitions); |
@@ -53,6 +55,8 @@ struct mtd_part { |
struct list_head list; |
}; |
+static void mtd_partition_split(struct mtd_info *master, struct mtd_part *part); |
+ |
/* |
* Given a pointer to the MTD object in the mtd_part structure, we can retrieve |
* the pointer to that structure. |
@@ -650,6 +654,7 @@ int mtd_add_partition(struct mtd_info *p |
mutex_unlock(&mtd_partitions_mutex); |
add_mtd_device(&new->mtd); |
+ mtd_partition_split(parent, new); |
mtd_add_partition_attrs(new); |
@@ -728,6 +733,29 @@ int mtd_del_partition(struct mtd_info *m |
} |
EXPORT_SYMBOL_GPL(mtd_del_partition); |
+#ifdef CONFIG_MTD_SPLIT_FIRMWARE_NAME |
+#define SPLIT_FIRMWARE_NAME CONFIG_MTD_SPLIT_FIRMWARE_NAME |
+#else |
+#define SPLIT_FIRMWARE_NAME "unused" |
+#endif |
+ |
+static void split_firmware(struct mtd_info *master, struct mtd_part *part) |
+{ |
+} |
+ |
+static void mtd_partition_split(struct mtd_info *master, struct mtd_part *part) |
+{ |
+ static int rootfs_found = 0; |
+ |
+ if (rootfs_found) |
+ return; |
+ |
+ if (IS_ENABLED(CONFIG_MTD_SPLIT_FIRMWARE) && |
+ !strcmp(part->mtd.name, SPLIT_FIRMWARE_NAME) && |
+ !of_find_property(mtd_get_of_node(&part->mtd), "compatible", NULL)) |
+ split_firmware(master, part); |
+} |
+ |
/* |
* This function, given a master MTD object and a partition table, creates |
* and registers slave MTD objects which are bound to the master according to |
@@ -759,6 +787,7 @@ int add_mtd_partitions(struct mtd_info * |
mutex_unlock(&mtd_partitions_mutex); |
add_mtd_device(&slave->mtd); |
+ mtd_partition_split(master, slave); |
mtd_add_partition_attrs(slave); |
/* Look for subpartitions */ |
parse_mtd_partitions(&slave->mtd, parts[i].types, NULL); |
/branches/18.06.1/target/linux/generic/pending-4.9/401-mtd-add-support-for-different-partition-parser-types.patch |
---|
@@ -0,0 +1,142 @@ |
From: Gabor Juhos <juhosg@openwrt.org> |
Subject: mtd: add support for different partition parser types |
Signed-off-by: Gabor Juhos <juhosg@openwrt.org> |
--- |
drivers/mtd/mtdpart.c | 56 ++++++++++++++++++++++++++++++++++++++++ |
include/linux/mtd/partitions.h | 11 ++++++++ |
2 files changed, 67 insertions(+) |
--- a/drivers/mtd/mtdpart.c |
+++ b/drivers/mtd/mtdpart.c |
@@ -56,6 +56,10 @@ struct mtd_part { |
}; |
static void mtd_partition_split(struct mtd_info *master, struct mtd_part *part); |
+static int parse_mtd_partitions_by_type(struct mtd_info *master, |
+ enum mtd_parser_type type, |
+ const struct mtd_partition **pparts, |
+ struct mtd_part_parser_data *data); |
/* |
* Given a pointer to the MTD object in the mtd_part structure, we can retrieve |
@@ -733,6 +737,36 @@ int mtd_del_partition(struct mtd_info *m |
} |
EXPORT_SYMBOL_GPL(mtd_del_partition); |
+static int |
+run_parsers_by_type(struct mtd_part *slave, enum mtd_parser_type type) |
+{ |
+ struct mtd_partition *parts; |
+ int nr_parts; |
+ int i; |
+ |
+ nr_parts = parse_mtd_partitions_by_type(&slave->mtd, type, (const struct mtd_partition **)&parts, |
+ NULL); |
+ if (nr_parts <= 0) |
+ return nr_parts; |
+ |
+ if (WARN_ON(!parts)) |
+ return 0; |
+ |
+ for (i = 0; i < nr_parts; i++) { |
+ /* adjust partition offsets */ |
+ parts[i].offset += slave->offset; |
+ |
+ mtd_add_partition(slave->parent, |
+ parts[i].name, |
+ parts[i].offset, |
+ parts[i].size); |
+ } |
+ |
+ kfree(parts); |
+ |
+ return nr_parts; |
+} |
+ |
#ifdef CONFIG_MTD_SPLIT_FIRMWARE_NAME |
#define SPLIT_FIRMWARE_NAME CONFIG_MTD_SPLIT_FIRMWARE_NAME |
#else |
@@ -1109,6 +1143,61 @@ void mtd_part_parser_cleanup(struct mtd_ |
} |
} |
+static struct mtd_part_parser * |
+get_partition_parser_by_type(enum mtd_parser_type type, |
+ struct mtd_part_parser *start) |
+{ |
+ struct mtd_part_parser *p, *ret = NULL; |
+ |
+ spin_lock(&part_parser_lock); |
+ |
+ p = list_prepare_entry(start, &part_parsers, list); |
+ if (start) |
+ mtd_part_parser_put(start); |
+ |
+ list_for_each_entry_continue(p, &part_parsers, list) { |
+ if (p->type == type && try_module_get(p->owner)) { |
+ ret = p; |
+ break; |
+ } |
+ } |
+ |
+ spin_unlock(&part_parser_lock); |
+ |
+ return ret; |
+} |
+ |
+static int parse_mtd_partitions_by_type(struct mtd_info *master, |
+ enum mtd_parser_type type, |
+ const struct mtd_partition **pparts, |
+ struct mtd_part_parser_data *data) |
+{ |
+ struct mtd_part_parser *prev = NULL; |
+ int ret = 0; |
+ |
+ while (1) { |
+ struct mtd_part_parser *parser; |
+ |
+ parser = get_partition_parser_by_type(type, prev); |
+ if (!parser) |
+ break; |
+ |
+ ret = (*parser->parse_fn)(master, pparts, data); |
+ |
+ if (ret > 0) { |
+ mtd_part_parser_put(parser); |
+ printk(KERN_NOTICE |
+ "%d %s partitions found on MTD device %s\n", |
+ ret, parser->name, master->name); |
+ break; |
+ } |
+ |
+ prev = parser; |
+ } |
+ |
+ return ret; |
+} |
+ |
int mtd_is_partition(const struct mtd_info *mtd) |
{ |
struct mtd_part *part; |
--- a/include/linux/mtd/partitions.h |
+++ b/include/linux/mtd/partitions.h |
@@ -73,6 +73,10 @@ struct mtd_part_parser_data { |
* Functions dealing with the various ways of partitioning the space |
*/ |
+enum mtd_parser_type { |
+ MTD_PARSER_TYPE_DEVICE = 0, |
+}; |
+ |
struct mtd_part_parser { |
struct list_head list; |
struct module *owner; |
@@ -81,6 +85,7 @@ struct mtd_part_parser { |
int (*parse_fn)(struct mtd_info *, const struct mtd_partition **, |
struct mtd_part_parser_data *); |
void (*cleanup)(const struct mtd_partition *pparts, int nr_parts); |
+ enum mtd_parser_type type; |
}; |
/* Container for passing around a set of parsed partitions */ |
/branches/18.06.1/target/linux/generic/pending-4.9/402-mtd-use-typed-mtd-parsers-for-rootfs-and-firmware-split.patch |
---|
@@ -0,0 +1,44 @@ |
From: Gabor Juhos <juhosg@openwrt.org> |
Subject: kernel/3.10: allow to use partition parsers for rootfs and firmware split |
lede-commit: 3b71cd94bc9517bc25267dccb393b07d4b54564e |
Signed-off-by: Gabor Juhos <juhosg@openwrt.org> |
--- |
drivers/mtd/mtdpart.c | 37 +++++++++++++++++++++++++++++++++++++ |
include/linux/mtd/partitions.h | 2 ++ |
2 files changed, 39 insertions(+) |
--- a/drivers/mtd/mtdpart.c |
+++ b/drivers/mtd/mtdpart.c |
@@ -775,6 +775,7 @@ run_parsers_by_type(struct mtd_part *sla |
static void split_firmware(struct mtd_info *master, struct mtd_part *part) |
{ |
+ run_parsers_by_type(part, MTD_PARSER_TYPE_FIRMWARE); |
} |
static void mtd_partition_split(struct mtd_info *master, struct mtd_part *part) |
@@ -784,6 +785,12 @@ static void mtd_partition_split(struct m |
if (rootfs_found) |
return; |
+ if (!strcmp(part->mtd.name, "rootfs")) { |
+ run_parsers_by_type(part, MTD_PARSER_TYPE_ROOTFS); |
+ |
+ rootfs_found = 1; |
+ } |
+ |
if (IS_ENABLED(CONFIG_MTD_SPLIT_FIRMWARE) && |
!strcmp(part->mtd.name, SPLIT_FIRMWARE_NAME) && |
!of_find_property(mtd_get_of_node(&part->mtd), "compatible", NULL)) |
--- a/include/linux/mtd/partitions.h |
+++ b/include/linux/mtd/partitions.h |
@@ -75,6 +75,8 @@ struct mtd_part_parser_data { |
enum mtd_parser_type { |
MTD_PARSER_TYPE_DEVICE = 0, |
+ MTD_PARSER_TYPE_ROOTFS, |
+ MTD_PARSER_TYPE_FIRMWARE, |
}; |
struct mtd_part_parser { |
/branches/18.06.1/target/linux/generic/pending-4.9/403-mtd-hook-mtdsplit-to-Kbuild.patch |
---|
@@ -0,0 +1,32 @@ |
From: Gabor Juhos <juhosg@openwrt.org> |
Subject: [PATCH] kernel/3.10: move squashfs check from rootfs split code into a separate file |
lede-commit: d89bea92b31b4e157a0fa438e75370f089f73427 |
Signed-off-by: Gabor Juhos <juhosg@openwrt.org> |
--- |
drivers/mtd/Kconfig | 2 ++ |
drivers/mtd/Makefile | 2 ++ |
2 files changed, 4 insertions(+) |
--- a/drivers/mtd/Kconfig |
+++ b/drivers/mtd/Kconfig |
@@ -27,6 +27,8 @@ config MTD_SPLIT_FIRMWARE_NAME |
depends on MTD_SPLIT_FIRMWARE |
default "firmware" |
+source "drivers/mtd/mtdsplit/Kconfig" |
+ |
endmenu |
config MTD_TESTS |
--- a/drivers/mtd/Makefile |
+++ b/drivers/mtd/Makefile |
@@ -6,6 +6,8 @@ |
obj-$(CONFIG_MTD) += mtd.o |
mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o |
+obj-$(CONFIG_MTD_SPLIT) += mtdsplit/ |
+ |
obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o |
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o |
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o |
/branches/18.06.1/target/linux/generic/pending-4.9/404-mtd-add-more-helper-functions.patch |
---|
@@ -0,0 +1,76 @@ |
From: Gabor Juhos <juhosg@openwrt.org> |
Subject: kernel/3.10: add separate rootfs partition parser |
lede-commit: daec7ad7688415156e2730e401503d09bd3acf91 |
Signed-off-by: Gabor Juhos <juhosg@openwrt.org> |
--- |
drivers/mtd/mtdpart.c | 29 +++++++++++++++++++++++++++++ |
include/linux/mtd/mtd.h | 18 ++++++++++++++++++ |
include/linux/mtd/partitions.h | 2 ++ |
3 files changed, 49 insertions(+) |
--- a/drivers/mtd/mtdpart.c |
+++ b/drivers/mtd/mtdpart.c |
@@ -1222,6 +1222,24 @@ int mtd_is_partition(const struct mtd_in |
} |
EXPORT_SYMBOL_GPL(mtd_is_partition); |
+struct mtd_info *mtdpart_get_master(const struct mtd_info *mtd) |
+{ |
+ if (!mtd_is_partition(mtd)) |
+ return (struct mtd_info *)mtd; |
+ |
+ return mtd_to_part(mtd)->parent; |
+} |
+EXPORT_SYMBOL_GPL(mtdpart_get_master); |
+ |
+uint64_t mtdpart_get_offset(const struct mtd_info *mtd) |
+{ |
+ if (!mtd_is_partition(mtd)) |
+ return 0; |
+ |
+ return mtd_to_part(mtd)->offset; |
+} |
+EXPORT_SYMBOL_GPL(mtdpart_get_offset); |
+ |
/* Returns the size of the entire flash chip */ |
uint64_t mtd_get_device_size(const struct mtd_info *mtd) |
{ |
--- a/include/linux/mtd/mtd.h |
+++ b/include/linux/mtd/mtd.h |
@@ -485,6 +485,24 @@ static inline uint32_t mtd_mod_by_eb(uin |
return do_div(sz, mtd->erasesize); |
} |
+static inline uint64_t mtd_roundup_to_eb(uint64_t sz, struct mtd_info *mtd) |
+{ |
+ if (mtd_mod_by_eb(sz, mtd) == 0) |
+ return sz; |
+ |
+ /* Round up to next erase block */ |
+ return (mtd_div_by_eb(sz, mtd) + 1) * mtd->erasesize; |
+} |
+ |
+static inline uint64_t mtd_rounddown_to_eb(uint64_t sz, struct mtd_info *mtd) |
+{ |
+ if (mtd_mod_by_eb(sz, mtd) == 0) |
+ return sz; |
+ |
+ /* Round down to the start of the current erase block */ |
+ return (mtd_div_by_eb(sz, mtd)) * mtd->erasesize; |
+} |
+ |
static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd) |
{ |
if (mtd->writesize_shift) |
--- a/include/linux/mtd/partitions.h |
+++ b/include/linux/mtd/partitions.h |
@@ -116,6 +116,8 @@ int mtd_is_partition(const struct mtd_in |
int mtd_add_partition(struct mtd_info *master, const char *name, |
long long offset, long long length); |
int mtd_del_partition(struct mtd_info *master, int partno); |
+struct mtd_info *mtdpart_get_master(const struct mtd_info *mtd); |
+uint64_t mtdpart_get_offset(const struct mtd_info *mtd); |
uint64_t mtd_get_device_size(const struct mtd_info *mtd); |
#endif |
/branches/18.06.1/target/linux/generic/pending-4.9/411-mtd-partial_eraseblock_write.patch |
---|
@@ -0,0 +1,154 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: mtd: implement write support for partitions covering only a part of an eraseblock (buffer data that would otherwise be erased) |
lede-commit: 87a8e8ac1067f58ba831c4aae443f3655c31cd80 |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
drivers/mtd/mtdpart.c | 90 ++++++++++++++++++++++++++++++++++++++++++++----- |
include/linux/mtd/mtd.h | 4 +++ |
2 files changed, 85 insertions(+), 9 deletions(-) |
--- a/drivers/mtd/mtdpart.c |
+++ b/drivers/mtd/mtdpart.c |
@@ -37,6 +37,8 @@ |
#include "mtdcore.h" |
#include "mtdsplit/mtdsplit.h" |
+#define MTD_ERASE_PARTIAL 0x8000 /* partition only covers parts of an erase block */ |
+ |
/* Our partition linked list */ |
static LIST_HEAD(mtd_partitions); |
static DEFINE_MUTEX(mtd_partitions_mutex); |
@@ -246,13 +248,61 @@ static int part_erase(struct mtd_info *m |
struct mtd_part *part = mtd_to_part(mtd); |
int ret; |
+ |
+ instr->partial_start = false; |
+ if (mtd->flags & MTD_ERASE_PARTIAL) { |
+ size_t readlen = 0; |
+ u64 mtd_ofs; |
+ |
+ instr->erase_buf = kmalloc(part->parent->erasesize, GFP_ATOMIC); |
+ if (!instr->erase_buf) |
+ return -ENOMEM; |
+ |
+ mtd_ofs = part->offset + instr->addr; |
+ instr->erase_buf_ofs = do_div(mtd_ofs, part->parent->erasesize); |
+ |
+ if (instr->erase_buf_ofs > 0) { |
+ instr->addr -= instr->erase_buf_ofs; |
+ ret = mtd_read(part->parent, |
+ instr->addr + part->offset, |
+ part->parent->erasesize, |
+ &readlen, instr->erase_buf); |
+ |
+ instr->len += instr->erase_buf_ofs; |
+ instr->partial_start = true; |
+ } else { |
+ mtd_ofs = part->offset + part->mtd.size; |
+ instr->erase_buf_ofs = part->parent->erasesize - |
+ do_div(mtd_ofs, part->parent->erasesize); |
+ |
+ if (instr->erase_buf_ofs > 0) { |
+ instr->len += instr->erase_buf_ofs; |
+ ret = mtd_read(part->parent, |
+ part->offset + instr->addr + |
+ instr->len - part->parent->erasesize, |
+ part->parent->erasesize, &readlen, |
+ instr->erase_buf); |
+ } else { |
+ ret = 0; |
+ } |
+ } |
+ if (ret < 0) { |
+ kfree(instr->erase_buf); |
+ return ret; |
+ } |
+ |
+ } |
+ |
instr->addr += part->offset; |
ret = part->parent->_erase(part->parent, instr); |
if (ret) { |
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) |
instr->fail_addr -= part->offset; |
instr->addr -= part->offset; |
+ if (mtd->flags & MTD_ERASE_PARTIAL) |
+ kfree(instr->erase_buf); |
} |
+ |
return ret; |
} |
@@ -260,6 +310,25 @@ void mtd_erase_callback(struct erase_inf |
{ |
if (instr->mtd->_erase == part_erase) { |
struct mtd_part *part = mtd_to_part(instr->mtd); |
+ size_t wrlen = 0; |
+ |
+ if (instr->mtd->flags & MTD_ERASE_PARTIAL) { |
+ if (instr->partial_start) { |
+ part->parent->_write(part->parent, |
+ instr->addr, instr->erase_buf_ofs, |
+ &wrlen, instr->erase_buf); |
+ instr->addr += instr->erase_buf_ofs; |
+ } else { |
+ instr->len -= instr->erase_buf_ofs; |
+ part->parent->_write(part->parent, |
+ instr->addr + instr->len, |
+ instr->erase_buf_ofs, &wrlen, |
+ instr->erase_buf + |
+ part->parent->erasesize - |
+ instr->erase_buf_ofs); |
+ } |
+ kfree(instr->erase_buf); |
+ } |
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) |
instr->fail_addr -= part->offset; |
@@ -566,19 +635,22 @@ static struct mtd_part *allocate_partiti |
remainder = do_div(tmp, wr_alignment); |
if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { |
/* Doesn't start on a boundary of major erase size */ |
- /* FIXME: Let it be writable if it is on a boundary of |
- * _minor_ erase size though */ |
- slave->mtd.flags &= ~MTD_WRITEABLE; |
- printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n", |
- part->name); |
+ slave->mtd.flags |= MTD_ERASE_PARTIAL; |
+ if (((u32)slave->mtd.size) > parent->erasesize) |
+ slave->mtd.flags &= ~MTD_WRITEABLE; |
+ else |
+ slave->mtd.erasesize = slave->mtd.size; |
} |
- tmp = slave->mtd.size; |
+ tmp = slave->offset + slave->mtd.size; |
remainder = do_div(tmp, wr_alignment); |
if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { |
- slave->mtd.flags &= ~MTD_WRITEABLE; |
- printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n", |
- part->name); |
+ slave->mtd.flags |= MTD_ERASE_PARTIAL; |
+ |
+ if ((u32)slave->mtd.size > parent->erasesize) |
+ slave->mtd.flags &= ~MTD_WRITEABLE; |
+ else |
+ slave->mtd.erasesize = slave->mtd.size; |
} |
mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops); |
--- a/include/linux/mtd/mtd.h |
+++ b/include/linux/mtd/mtd.h |
@@ -55,6 +55,10 @@ struct erase_info { |
u_long priv; |
u_char state; |
struct erase_info *next; |
+ |
+ u8 *erase_buf; |
+ u32 erase_buf_ofs; |
+ bool partial_start; |
}; |
struct mtd_erase_region_info { |
/branches/18.06.1/target/linux/generic/pending-4.9/412-mtd-partial_eraseblock_unlock.patch |
---|
@@ -0,0 +1,40 @@ |
From: Tim Harvey <tharvey@gateworks.com> |
Subject: mtd: allow partial block unlock |
This allows sysupgrade for devices such as the Gateworks Avila/Cambria |
product families based on the ixp4xx using the redboot bootloader with |
combined FIS directory and RedBoot config partitions on larger FLASH |
devices with larger eraseblocks. |
This second iteration of this patch addresses previous issues: |
- whitespace breakage fixed |
- unlock in all scenarios |
- simplification and fix logic bug |
[john@phrozen.org: this should be moved to the ixp4xx folder] |
Signed-off-by: Tim Harvey <tharvey@gateworks.com> |
--- |
drivers/mtd/mtdpart.c | 11 ++++++++++- |
1 file changed, 10 insertions(+), 1 deletion(-) |
--- a/drivers/mtd/mtdpart.c |
+++ b/drivers/mtd/mtdpart.c |
@@ -348,7 +348,16 @@ static int part_lock(struct mtd_info *mt |
static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
{ |
struct mtd_part *part = mtd_to_part(mtd); |
- return part->parent->_unlock(part->parent, ofs + part->offset, len); |
+ |
+ ofs += part->offset; |
+ |
+ if (mtd->flags & MTD_ERASE_PARTIAL) { |
+ /* round up len to next erasesize and round down offset to prev block */ |
+ len = (mtd_div_by_eb(len, part->parent) + 1) * part->parent->erasesize; |
+ ofs &= ~(part->parent->erasesize - 1); |
+ } |
+ |
+ return part->parent->_unlock(part->parent, ofs, len); |
} |
static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
/branches/18.06.1/target/linux/generic/pending-4.9/419-mtd-redboot-add-of_match_table-with-DT-binding.patch |
---|
@@ -0,0 +1,39 @@ |
From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> |
Subject: [PATCH] mtd: redboot: add of_match_table with DT binding |
MIME-Version: 1.0 |
Content-Type: text/plain; charset=UTF-8 |
Content-Transfer-Encoding: 8bit |
This allows parsing RedBoot compatible partitions for properly described |
flash device in DT. |
Signed-off-by: Rafał Miłecki <rafal@milecki.pl> |
--- |
--- a/drivers/mtd/redboot.c |
+++ b/drivers/mtd/redboot.c |
@@ -29,6 +29,7 @@ |
#include <linux/mtd/mtd.h> |
#include <linux/mtd/partitions.h> |
#include <linux/module.h> |
+#include <linux/mod_devicetable.h> |
struct fis_image_desc { |
unsigned char name[16]; // Null terminated name |
@@ -289,9 +290,16 @@ static int parse_redboot_partitions(stru |
return ret; |
} |
+static const struct of_device_id redboot_parser_of_match_table[] = { |
+ { .compatible = "ecoscentric,redboot-fis-partitions" }, |
+ {}, |
+}; |
+MODULE_DEVICE_TABLE(of, redboot_parser_of_match_table); |
+ |
static struct mtd_part_parser redboot_parser = { |
.parse_fn = parse_redboot_partitions, |
.name = "RedBoot", |
+ .of_match_table = redboot_parser_of_match_table, |
}; |
module_mtd_part_parser(redboot_parser); |
/branches/18.06.1/target/linux/generic/pending-4.9/420-mtd-redboot_space.patch |
---|
@@ -0,0 +1,41 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: add patch for including unpartitioned space in the rootfs partition for redboot devices (if applicable) |
[john@phrozen.org: used by ixp and others] |
lede-commit: 394918851f84e4d00fa16eb900e7700e95091f00 |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
drivers/mtd/redboot.c | 19 +++++++++++++------ |
1 file changed, 13 insertions(+), 6 deletions(-) |
--- a/drivers/mtd/redboot.c |
+++ b/drivers/mtd/redboot.c |
@@ -266,14 +266,21 @@ static int parse_redboot_partitions(stru |
#endif |
names += strlen(names)+1; |
-#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED |
if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) { |
- i++; |
- parts[i].offset = parts[i-1].size + parts[i-1].offset; |
- parts[i].size = fl->next->img->flash_base - parts[i].offset; |
- parts[i].name = nullname; |
- } |
+ if (!strcmp(parts[i].name, "rootfs")) { |
+ parts[i].size = fl->next->img->flash_base; |
+ parts[i].size &= ~(master->erasesize - 1); |
+ parts[i].size -= parts[i].offset; |
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED |
+ nrparts--; |
+ } else { |
+ i++; |
+ parts[i].offset = parts[i-1].size + parts[i-1].offset; |
+ parts[i].size = fl->next->img->flash_base - parts[i].offset; |
+ parts[i].name = nullname; |
#endif |
+ } |
+ } |
tmp_fl = fl; |
fl = fl->next; |
kfree(tmp_fl); |
/branches/18.06.1/target/linux/generic/pending-4.9/430-mtd-add-myloader-partition-parser.patch |
---|
@@ -0,0 +1,47 @@ |
From: Florian Fainelli <f.fainelli@gmail.com> |
Subject: Add myloader partition table parser |
[john@phozen.org: shoud be upstreamable] |
lede-commit: d8bf22859b51faa09d22c056fe221a45d2f7a3b8 |
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com> |
--- |
drivers/mtd/Kconfig | 16 ++++++++++++++++ |
drivers/mtd/Makefile | 1 + |
2 files changed, 17 insertions(+) |
--- a/drivers/mtd/Kconfig |
+++ b/drivers/mtd/Kconfig |
@@ -178,6 +178,22 @@ menu "Partition parsers" |
source "drivers/mtd/parsers/Kconfig" |
endmenu |
+config MTD_MYLOADER_PARTS |
+ tristate "MyLoader partition parsing" |
+ depends on ADM5120 || ATH25 || ATH79 |
+ ---help--- |
+ MyLoader is a bootloader which allows the user to define partitions |
+ in flash devices, by putting a table in the second erase block |
+ on the device, similar to a partition table. This table gives the |
+ offsets and lengths of the user defined partitions. |
+ |
+ If you need code which can detect and parse these tables, and |
+ register MTD 'partitions' corresponding to each image detected, |
+ enable this option. |
+ |
+ You will still need the parsing functions to be called by the driver |
+ for your particular device. It won't happen automatically. |
+ |
comment "User Modules And Translation Layers" |
# |
--- a/drivers/mtd/Makefile |
+++ b/drivers/mtd/Makefile |
@@ -15,6 +15,7 @@ obj-$(CONFIG_MTD_AFS_PARTS) += afs.o |
obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o |
obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o |
obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o |
+obj-$(CONFIG_MTD_MYLOADER_PARTS) += myloader.o |
obj-y += parsers/ |
# 'Users' - code which presents functionality to userspace. |
/branches/18.06.1/target/linux/generic/pending-4.9/431-mtd-bcm47xxpart-check-for-bad-blocks-when-calculatin.patch |
---|
@@ -0,0 +1,68 @@ |
From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <zajec5@gmail.com> |
Subject: [PATCH] mtd: bcm47xxpart: check for bad blocks when calculating offsets |
Signed-off-by: Rafał Miłecki <zajec5@gmail.com> |
--- |
--- a/drivers/mtd/parsers/parser_trx.c |
+++ b/drivers/mtd/parsers/parser_trx.c |
@@ -30,6 +30,33 @@ struct trx_header { |
uint32_t offset[3]; |
} __packed; |
+/* |
+ * Calculate real end offset (address) for a given amount of data. It checks |
+ * all blocks skipping bad ones. |
+ */ |
+static size_t parser_trx_real_offset(struct mtd_info *mtd, size_t bytes) |
+{ |
+ size_t real_offset = 0; |
+ |
+ if (mtd_block_isbad(mtd, real_offset)) |
+ pr_warn("Base offset shouldn't be at bad block"); |
+ |
+ while (bytes >= mtd->erasesize) { |
+ bytes -= mtd->erasesize; |
+ real_offset += mtd->erasesize; |
+ while (mtd_block_isbad(mtd, real_offset)) { |
+ real_offset += mtd->erasesize; |
+ |
+ if (real_offset >= mtd->size) |
+ return real_offset - mtd->erasesize; |
+ } |
+ } |
+ |
+ real_offset += bytes; |
+ |
+ return real_offset; |
+} |
+ |
static const char *parser_trx_data_part_name(struct mtd_info *master, |
size_t offset) |
{ |
@@ -84,21 +111,21 @@ static int parser_trx_parse(struct mtd_i |
if (trx.offset[2]) { |
part = &parts[curr_part++]; |
part->name = "loader"; |
- part->offset = trx.offset[i]; |
+ part->offset = parser_trx_real_offset(mtd, trx.offset[i]); |
i++; |
} |
if (trx.offset[i]) { |
part = &parts[curr_part++]; |
part->name = "linux"; |
- part->offset = trx.offset[i]; |
+ part->offset = parser_trx_real_offset(mtd, trx.offset[i]); |
i++; |
} |
if (trx.offset[i]) { |
part = &parts[curr_part++]; |
- part->name = parser_trx_data_part_name(mtd, trx.offset[i]); |
- part->offset = trx.offset[i]; |
+ part->offset = parser_trx_real_offset(mtd, trx.offset[i]); |
+ part->name = parser_trx_data_part_name(mtd, part->offset); |
i++; |
} |
/branches/18.06.1/target/linux/generic/pending-4.9/432-mtd-bcm47xxpart-detect-T_Meter-partition.patch |
---|
@@ -0,0 +1,37 @@ |
From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <zajec5@gmail.com> |
Subject: mtd: bcm47xxpart: detect T_Meter partition |
It can be found on many Netgear devices. It consists of many 0x30 blocks |
starting with 4D 54. |
Signed-off-by: Rafał Miłecki <zajec5@gmail.com> |
--- |
drivers/mtd/bcm47xxpart.c | 10 ++++++++++ |
1 file changed, 10 insertions(+) |
--- a/drivers/mtd/bcm47xxpart.c |
+++ b/drivers/mtd/bcm47xxpart.c |
@@ -40,6 +40,7 @@ |
#define NVRAM_HEADER 0x48534C46 /* FLSH */ |
#define POT_MAGIC1 0x54544f50 /* POTT */ |
#define POT_MAGIC2 0x504f /* OP */ |
+#define T_METER_MAGIC 0x4D540000 /* MT */ |
#define ML_MAGIC1 0x39685a42 |
#define ML_MAGIC2 0x26594131 |
#define TRX_MAGIC 0x30524448 |
@@ -183,6 +184,15 @@ static int bcm47xxpart_parse(struct mtd_ |
MTD_WRITEABLE); |
continue; |
} |
+ |
+ /* T_Meter */ |
+ if ((le32_to_cpu(buf[0x000 / 4]) & 0xFFFF0000) == T_METER_MAGIC && |
+ (le32_to_cpu(buf[0x030 / 4]) & 0xFFFF0000) == T_METER_MAGIC && |
+ (le32_to_cpu(buf[0x060 / 4]) & 0xFFFF0000) == T_METER_MAGIC) { |
+ bcm47xxpart_add_part(&parts[curr_part++], "T_Meter", offset, |
+ MTD_WRITEABLE); |
+ continue; |
+ } |
/* TRX */ |
if (buf[0x000 / 4] == TRX_MAGIC) { |
/branches/18.06.1/target/linux/generic/pending-4.9/440-block2mtd_init.patch |
---|
@@ -0,0 +1,116 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: block2mtd |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
drivers/mtd/devices/block2mtd.c | 30 ++++++++++++++++++++---------- |
1 file changed, 20 insertions(+), 10 deletions(-) |
--- a/drivers/mtd/devices/block2mtd.c |
+++ b/drivers/mtd/devices/block2mtd.c |
@@ -26,6 +26,7 @@ |
#include <linux/list.h> |
#include <linux/init.h> |
#include <linux/mtd/mtd.h> |
+#include <linux/mtd/partitions.h> |
#include <linux/mutex.h> |
#include <linux/mount.h> |
#include <linux/slab.h> |
@@ -219,7 +220,7 @@ static void block2mtd_free_device(struct |
static struct block2mtd_dev *add_device(char *devname, int erase_size, |
- int timeout) |
+ const char *mtdname, int timeout) |
{ |
#ifndef MODULE |
int i; |
@@ -227,6 +228,7 @@ static struct block2mtd_dev *add_device( |
const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL; |
struct block_device *bdev = ERR_PTR(-ENODEV); |
struct block2mtd_dev *dev; |
+ struct mtd_partition *part; |
char *name; |
if (!devname) |
@@ -283,13 +285,16 @@ static struct block2mtd_dev *add_device( |
/* Setup the MTD structure */ |
/* make the name contain the block device in */ |
- name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname); |
+ if (!mtdname) |
+ mtdname = devname; |
+ name = kmalloc(strlen(mtdname) + 1, GFP_KERNEL); |
if (!name) |
goto err_destroy_mutex; |
+ strcpy(name, mtdname); |
dev->mtd.name = name; |
- dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; |
+ dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK & ~(erase_size - 1); |
dev->mtd.erasesize = erase_size; |
dev->mtd.writesize = 1; |
dev->mtd.writebufsize = PAGE_SIZE; |
@@ -302,7 +307,11 @@ static struct block2mtd_dev *add_device( |
dev->mtd.priv = dev; |
dev->mtd.owner = THIS_MODULE; |
- if (mtd_device_register(&dev->mtd, NULL, 0)) { |
+ part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL); |
+ part->name = name; |
+ part->offset = 0; |
+ part->size = dev->mtd.size; |
+ if (mtd_device_register(&dev->mtd, part, 1)) { |
/* Device didn't get added, so free the entry */ |
goto err_destroy_mutex; |
} |
@@ -310,8 +319,7 @@ static struct block2mtd_dev *add_device( |
list_add(&dev->list, &blkmtd_device_list); |
pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n", |
dev->mtd.index, |
- dev->mtd.name + strlen("block2mtd: "), |
- dev->mtd.erasesize >> 10, dev->mtd.erasesize); |
+ mtdname, dev->mtd.erasesize >> 10, dev->mtd.erasesize); |
return dev; |
err_destroy_mutex: |
@@ -384,7 +392,7 @@ static int block2mtd_setup2(const char * |
/* 80 for device, 12 for erase size, 80 for name, 8 for timeout */ |
char buf[80 + 12 + 80 + 8]; |
char *str = buf; |
- char *token[2]; |
+ char *token[3]; |
char *name; |
size_t erase_size = PAGE_SIZE; |
unsigned long timeout = MTD_DEFAULT_TIMEOUT; |
@@ -398,7 +406,7 @@ static int block2mtd_setup2(const char * |
strcpy(str, val); |
kill_final_newline(str); |
- for (i = 0; i < 2; i++) |
+ for (i = 0; i < 3; i++) |
token[i] = strsep(&str, ","); |
if (str) { |
@@ -424,8 +432,10 @@ static int block2mtd_setup2(const char * |
return 0; |
} |
} |
+ if (token[2] && (strlen(token[2]) + 1 > 80)) |
+ pr_err("mtd device name too long\n"); |
- add_device(name, erase_size, timeout); |
+ add_device(name, erase_size, token[2], timeout); |
return 0; |
} |
@@ -459,7 +469,7 @@ static int block2mtd_setup(const char *v |
module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200); |
-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\""); |
+MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\""); |
static int __init block2mtd_init(void) |
{ |
/branches/18.06.1/target/linux/generic/pending-4.9/441-block2mtd_probe.patch |
---|
@@ -0,0 +1,47 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: block2mtd |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
drivers/mtd/devices/block2mtd.c | 9 ++++++--- |
1 file changed, 6 insertions(+), 3 deletions(-) |
--- a/drivers/mtd/devices/block2mtd.c |
+++ b/drivers/mtd/devices/block2mtd.c |
@@ -392,7 +392,7 @@ static int block2mtd_setup2(const char * |
/* 80 for device, 12 for erase size, 80 for name, 8 for timeout */ |
char buf[80 + 12 + 80 + 8]; |
char *str = buf; |
- char *token[3]; |
+ char *token[4]; |
char *name; |
size_t erase_size = PAGE_SIZE; |
unsigned long timeout = MTD_DEFAULT_TIMEOUT; |
@@ -406,7 +406,7 @@ static int block2mtd_setup2(const char * |
strcpy(str, val); |
kill_final_newline(str); |
- for (i = 0; i < 3; i++) |
+ for (i = 0; i < 4; i++) |
token[i] = strsep(&str, ","); |
if (str) { |
@@ -435,6 +435,9 @@ static int block2mtd_setup2(const char * |
if (token[2] && (strlen(token[2]) + 1 > 80)) |
pr_err("mtd device name too long\n"); |
+ if (token[3] && kstrtoul(token[3], 0, &timeout)) |
+ pr_err("invalid timeout\n"); |
+ |
add_device(name, erase_size, token[2], timeout); |
return 0; |
@@ -469,7 +472,7 @@ static int block2mtd_setup(const char *v |
module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200); |
-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\""); |
+MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>[,<timeout>]]]\""); |
static int __init block2mtd_init(void) |
{ |
/branches/18.06.1/target/linux/generic/pending-4.9/450-mtd-m25p80-allow-fallback-from-spi_flash_read-to-reg.patch |
---|
@@ -0,0 +1,36 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Date: Fri, 23 Feb 2018 17:12:16 +0100 |
Subject: [PATCH] mtd: m25p80: allow fallback from spi_flash_read to regular |
SPI transfer |
Some flash controllers, e.g. on the ath79 platform can support a fast |
flash read via memory mapping, but only if the flash chip is in |
3-byte address mode. |
Since spi_flash_read_supported does not have access to the same data as |
spi_flash_read, the driver can't detect an unsupported call until m25p80 |
has decided to use spi_flash_read. |
Allow the driver to indicate a fallback to a regular SPI transfer by |
returning -EOPNOTSUPP |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
--- a/drivers/mtd/devices/m25p80.c |
+++ b/drivers/mtd/devices/m25p80.c |
@@ -155,9 +155,11 @@ static ssize_t m25p80_read(struct spi_no |
msg.data_nbits = m25p80_rx_nbits(nor); |
ret = spi_flash_read(spi, &msg); |
- if (ret < 0) |
- return ret; |
- return msg.retlen; |
+ if (ret != -EOPNOTSUPP) { |
+ if (ret < 0) |
+ return ret; |
+ return msg.retlen; |
+ } |
} |
spi_message_init(&m); |
/branches/18.06.1/target/linux/generic/pending-4.9/460-mtd-cfi_cmdset_0002-no-erase_suspend.patch |
---|
@@ -0,0 +1,25 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: kernel: disable cfi cmdset 0002 erase suspend |
on some platforms, erase suspend leads to data corruption and lockups when write |
ops collide with erase ops. this has been observed on the buffalo wzr-hp-g300nh. |
rather than play whack-a-mole with a hard to reproduce issue on a variety of devices, |
simply disable erase suspend, as it will usually not produce any useful gain on |
the small filesystems used on embedded hardware. |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
drivers/mtd/chips/cfi_cmdset_0002.c | 2 +- |
1 file changed, 1 insertion(+), 1 deletion(-) |
--- a/drivers/mtd/chips/cfi_cmdset_0002.c |
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c |
@@ -807,7 +807,7 @@ static int get_chip(struct map_info *map |
return 0; |
case FL_ERASING: |
- if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || |
+ if (1 /* no suspend */ || !cfip || !(cfip->EraseSuspend & (0x1|0x2)) || |
!(mode == FL_READY || mode == FL_POINT || |
(mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) |
goto sleep; |
/branches/18.06.1/target/linux/generic/pending-4.9/461-mtd-cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch |
---|
@@ -0,0 +1,17 @@ |
From: George Kashperko <george@znau.edu.ua> |
Subject: Issue map read after Write Buffer Load command to ensure chip is ready to receive data. |
Signed-off-by: George Kashperko <george@znau.edu.ua> |
--- |
drivers/mtd/chips/cfi_cmdset_0002.c | 1 + |
1 file changed, 1 insertion(+) |
--- a/drivers/mtd/chips/cfi_cmdset_0002.c |
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c |
@@ -1829,6 +1829,7 @@ static int __xipram do_write_buffer(stru |
/* Write Buffer Load */ |
map_write(map, CMD(0x25), cmd_adr); |
+ (void) map_read(map, cmd_adr); |
chip->state = FL_WRITING_TO_BUFFER; |
/branches/18.06.1/target/linux/generic/pending-4.9/465-m25p80-mx-disable-software-protection.patch |
---|
@@ -0,0 +1,18 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: Disable software protection bits for Macronix flashes. |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
drivers/mtd/spi-nor/spi-nor.c | 1 + |
1 file changed, 1 insertion(+) |
--- a/drivers/mtd/spi-nor/spi-nor.c |
+++ b/drivers/mtd/spi-nor/spi-nor.c |
@@ -1601,6 +1601,7 @@ int spi_nor_scan(struct spi_nor *nor, co |
if (JEDEC_MFR(info) == SNOR_MFR_ATMEL || |
JEDEC_MFR(info) == SNOR_MFR_INTEL || |
+ JEDEC_MFR(info) == SNOR_MFR_MACRONIX || |
JEDEC_MFR(info) == SNOR_MFR_SST || |
info->flags & SPI_NOR_HAS_LOCK) { |
write_enable(nor); |
/branches/18.06.1/target/linux/generic/pending-4.9/466-Revert-mtd-spi-nor-fix-Spansion-regressions-aliased-.patch |
---|
@@ -0,0 +1,37 @@ |
From: Matthias Schiffer <mschiffer@universe-factory.net> |
Date: Tue, 9 Jan 2018 20:41:48 +0100 |
Subject: [PATCH] Revert "mtd: spi-nor: fix Spansion regressions (aliased with |
Winbond)" |
This reverts commit 67b9bcd36906e12a15ffec19463afbbd6a41660e. |
The underlying issue breaking Spansion flash has been fixed with "mtd: spi-nor: |
wait until lock/unlock operations are ready" and "mtd: spi-nor: wait for SR_WIP |
to clear on initial unlock", so we can support unlocking for Winbond flash |
again. |
Signed-off-by: Matthias Schiffer <mschiffer@universe-factory.net> |
--- |
drivers/mtd/spi-nor/spi-nor.c | 4 +++- |
1 file changed, 3 insertions(+), 1 deletion(-) |
--- a/drivers/mtd/spi-nor/spi-nor.c |
+++ b/drivers/mtd/spi-nor/spi-nor.c |
@@ -1603,6 +1603,7 @@ int spi_nor_scan(struct spi_nor *nor, co |
JEDEC_MFR(info) == SNOR_MFR_INTEL || |
JEDEC_MFR(info) == SNOR_MFR_MACRONIX || |
JEDEC_MFR(info) == SNOR_MFR_SST || |
+ JEDEC_MFR(info) == SNOR_MFR_WINBOND || |
info->flags & SPI_NOR_HAS_LOCK) { |
write_enable(nor); |
write_sr(nor, 0); |
@@ -1621,7 +1622,8 @@ int spi_nor_scan(struct spi_nor *nor, co |
/* NOR protection support for STmicro/Micron chips and similar */ |
if (JEDEC_MFR(info) == SNOR_MFR_MICRON || |
- info->flags & SPI_NOR_HAS_LOCK) { |
+ JEDEC_MFR(info) == SNOR_MFR_WINBOND || |
+ info->flags & SPI_NOR_HAS_LOCK) { |
nor->flash_lock = stm_lock; |
nor->flash_unlock = stm_unlock; |
nor->flash_is_locked = stm_is_locked; |
/branches/18.06.1/target/linux/generic/pending-4.9/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch |
---|
@@ -0,0 +1,56 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Date: Sat, 4 Nov 2017 07:40:23 +0100 |
Subject: [PATCH] mtd: spi-nor: support limiting 4K sectors support based on |
flash size |
Some devices need 4K sectors to be able to deal with small flash chips. |
For instance, w25x05 is 64 KiB in size, and without 4K sectors, the |
entire chip is just one erase block. |
On bigger flash chip sizes, using 4K sectors can significantly slow down |
many operations, including using a writable filesystem. There are several |
platforms where it makes sense to use a single kernel on both kinds of |
devices. |
To support this properly, allow configuring an upper flash chip size |
limit for 4K sectors support. |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
--- a/drivers/mtd/spi-nor/Kconfig |
+++ b/drivers/mtd/spi-nor/Kconfig |
@@ -29,6 +29,17 @@ config MTD_SPI_NOR_USE_4K_SECTORS |
Please note that some tools/drivers/filesystems may not work with |
4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum). |
+config MTD_SPI_NOR_USE_4K_SECTORS_LIMIT |
+ int "Maximum flash chip size to use 4K sectors on (in KiB)" |
+ depends on MTD_SPI_NOR_USE_4K_SECTORS |
+ default "4096" |
+ help |
+ There are many flash chips that support 4K sectors, but are so large |
+ that using them significantly slows down writing large amounts of |
+ data or using a writable filesystem. |
+ Any flash chip larger than the size specified in this option will |
+ not use 4K sectors. |
+ |
config SPI_ATMEL_QUADSPI |
tristate "Atmel Quad SPI Controller" |
depends on ARCH_AT91 || (ARM && COMPILE_TEST && !ARCH_EBSA110) |
--- a/drivers/mtd/spi-nor/spi-nor.c |
+++ b/drivers/mtd/spi-nor/spi-nor.c |
@@ -1648,10 +1648,12 @@ int spi_nor_scan(struct spi_nor *nor, co |
#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS |
/* prefer "small sector" erase if possible */ |
- if (info->flags & SECT_4K) { |
+ if ((info->flags & SECT_4K) && (mtd->size <= |
+ CONFIG_MTD_SPI_NOR_USE_4K_SECTORS_LIMIT * 1024)) { |
nor->erase_opcode = SPINOR_OP_BE_4K; |
mtd->erasesize = 4096; |
- } else if (info->flags & SECT_4K_PMC) { |
+ } else if ((info->flags & SECT_4K_PMC) && (mtd->size <= |
+ CONFIG_MTD_SPI_NOR_USE_4K_SECTORS_LIMIT * 1024)) { |
nor->erase_opcode = SPINOR_OP_BE_4K_PMC; |
mtd->erasesize = 4096; |
} else |
/branches/18.06.1/target/linux/generic/pending-4.9/475-mtd-spi-nor-Add-Winbond-w25q128jv-support.patch |
---|
@@ -0,0 +1,34 @@ |
From: Robert Marko <robimarko@gmail.com> |
To: linux-mtd@lists.infradead.org |
Subject: mtd: spi-nor: Add Winbond w25q128jv support |
Date: Mon, 25 Jun 2018 13:17:48 +0200 |
Datasheet: |
http://www.winbond.com/resource-files/w25q128jv%20revf%2003272018%20plus.pdf |
Testing done on Mikrotik Routerboard wAP R board. |
It does not support Dual or Quad modes. |
Signed-off-by: Robert Marko <robimarko@gmail.com> |
--- |
Changes in v2: |
- Correct the title |
--- |
drivers/mtd/spi-nor/spi-nor.c | 5 +++++ |
1 file changed, 5 insertions(+) |
--- a/drivers/mtd/spi-nor/spi-nor.c |
+++ b/drivers/mtd/spi-nor/spi-nor.c |
@@ -1152,6 +1152,11 @@ static const struct flash_info spi_nor_i |
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) |
}, |
+ { |
+ "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256, |
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) |
+ }, |
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, |
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) }, |
{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, |
/branches/18.06.1/target/linux/generic/pending-4.9/476-mtd-spi-nor-add-eon-en25q128.patch |
---|
@@ -0,0 +1,18 @@ |
From: Piotr Dymacz <pepe2k@gmail.com> |
Subject: kernel/mtd: add support for EON EN25Q128 |
Signed-off-by: Piotr Dymacz <pepe2k@gmail.com> |
--- |
drivers/mtd/spi-nor/spi-nor.c | 1 + |
1 file changed, 1 insertion(+) |
--- a/drivers/mtd/spi-nor/spi-nor.c |
+++ b/drivers/mtd/spi-nor/spi-nor.c |
@@ -954,6 +954,7 @@ static const struct flash_info spi_nor_i |
{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, |
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, |
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) }, |
+ { "en25q128", INFO(0x1c3018, 0, 64 * 1024, 256, SECT_4K) }, |
{ "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) }, |
{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) }, |
{ "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) }, |
/branches/18.06.1/target/linux/generic/pending-4.9/477-mtd-add-spi-nor-add-mx25u3235f.patch |
---|
@@ -0,0 +1,18 @@ |
From: André Valentin <avalentin@marcant.net> |
Subject: linux/mtd: add id for mx25u3235f needed by ZyXEL NBG6817 |
Signed-off-by: André Valentin <avalentin@marcant.net> |
--- |
drivers/mtd/spi-nor/spi-nor.c | 1 + |
1 file changed, 1 insertion(+) |
--- a/drivers/mtd/spi-nor/spi-nor.c |
+++ b/drivers/mtd/spi-nor/spi-nor.c |
@@ -1020,6 +1020,7 @@ static const struct flash_info spi_nor_i |
{ "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) }, |
{ "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) }, |
{ "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) }, |
+ { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64, 0) }, |
{ "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) }, |
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, |
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, |
/branches/18.06.1/target/linux/generic/pending-4.9/478-mtd-spi-nor-Add-support-for-XM25QH64A-and-XM25QH128A.patch |
---|
@@ -0,0 +1,30 @@ |
From b02f3405c935a28200db26b63e42086057565cf4 Mon Sep 17 00:00:00 2001 |
From: Hauke Mehrtens <hauke@hauke-m.de> |
Date: Sat, 31 Mar 2018 20:09:54 +0200 |
Subject: [PATCH] mtd: spi-nor: Add support for XM25QH64A and XM25QH128A |
These devices are produced by Wuhan Xinxin Semiconductor Manufacturing |
Corp. (XMC) and found on some routers from Chinese manufactures. |
The data sheets can be found here: |
http://www.xmcwh.com/Uploads/2018-03-01/5a9799e4cb355.pdf |
http://www.xmcwh.com/Uploads/2018-02-05/5a77e6dbe968b.pdf |
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de> |
--- |
drivers/mtd/spi-nor/spi-nor.c | 4 ++++ |
1 file changed, 4 insertions(+) |
--- a/drivers/mtd/spi-nor/spi-nor.c |
+++ b/drivers/mtd/spi-nor/spi-nor.c |
@@ -1177,6 +1177,10 @@ static const struct flash_info spi_nor_i |
{ "3S400AN", S3AN_INFO(0x1f2400, 256, 264) }, |
{ "3S700AN", S3AN_INFO(0x1f2500, 512, 264) }, |
{ "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) }, |
+ |
+ /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */ |
+ { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, |
+ { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, |
{ }, |
}; |
/branches/18.06.1/target/linux/generic/pending-4.9/479-mtd-spi-nor-add-eon-en25qh32.patch |
---|
@@ -0,0 +1,10 @@ |
--- a/drivers/mtd/spi-nor/spi-nor.c |
+++ b/drivers/mtd/spi-nor/spi-nor.c |
@@ -955,6 +955,7 @@ static const struct flash_info spi_nor_i |
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, |
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) }, |
{ "en25q128", INFO(0x1c3018, 0, 64 * 1024, 256, SECT_4K) }, |
+ { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) }, |
{ "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) }, |
{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) }, |
{ "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) }, |
/branches/18.06.1/target/linux/generic/pending-4.9/480-mtd-set-rootfs-to-be-root-dev.patch |
---|
@@ -0,0 +1,38 @@ |
From: Gabor Juhos <juhosg@openwrt.org> |
Subject: kernel/3.1[02]: move MTD root device setup code to mtdcore |
The current code only allows to automatically set |
root device on MTD partitions. Move the code to MTD |
core to allow to use it with all MTD devices. |
Signed-off-by: Gabor Juhos <juhosg@openwrt.org> |
--- |
drivers/mtd/mtdcore.c | 10 ++++++++++ |
1 file changed, 10 insertions(+) |
--- a/drivers/mtd/mtdcore.c |
+++ b/drivers/mtd/mtdcore.c |
@@ -40,6 +40,7 @@ |
#include <linux/slab.h> |
#include <linux/reboot.h> |
#include <linux/leds.h> |
+#include <linux/root_dev.h> |
#include <linux/mtd/mtd.h> |
#include <linux/mtd/partitions.h> |
@@ -570,6 +571,15 @@ int add_mtd_device(struct mtd_info *mtd) |
of this try_ nonsense, and no bitching about it |
either. :) */ |
__module_get(THIS_MODULE); |
+ |
+ if (!strcmp(mtd->name, "rootfs") && |
+ IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV) && |
+ ROOT_DEV == 0) { |
+ pr_notice("mtd: device %d (%s) set to be root filesystem\n", |
+ mtd->index, mtd->name); |
+ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index); |
+ } |
+ |
return 0; |
fail_added: |
/branches/18.06.1/target/linux/generic/pending-4.9/490-ubi-auto-attach-mtd-device-named-ubi-or-data-on-boot.patch |
---|
@@ -0,0 +1,97 @@ |
From: Daniel Golle <daniel@makrotopia.org> |
Subject: ubi: auto-attach mtd device named "ubi" or "data" on boot |
Signed-off-by: Daniel Golle <daniel@makrotopia.org> |
--- |
drivers/mtd/ubi/build.c | 36 ++++++++++++++++++++++++++++++++++++ |
1 file changed, 36 insertions(+) |
--- a/drivers/mtd/ubi/build.c |
+++ b/drivers/mtd/ubi/build.c |
@@ -1226,6 +1226,73 @@ static struct mtd_info * __init open_mtd |
return mtd; |
} |
+/* |
+ * This function tries attaching mtd partitions named either "ubi" or "data" |
+ * during boot. |
+ */ |
+static void __init ubi_auto_attach(void) |
+{ |
+ int err; |
+ struct mtd_info *mtd; |
+ loff_t offset = 0; |
+ size_t len; |
+ char magic[4]; |
+ |
+ /* try attaching mtd device named "ubi" or "data" */ |
+ mtd = open_mtd_device("ubi"); |
+ if (IS_ERR(mtd)) |
+ mtd = open_mtd_device("data"); |
+ |
+ if (IS_ERR(mtd)) |
+ return; |
+ |
+ /* get the first not bad block */ |
+ if (mtd_can_have_bb(mtd)) |
+ while (mtd_block_isbad(mtd, offset)) { |
+ offset += mtd->erasesize; |
+ |
+ if (offset > mtd->size) { |
+ pr_err("UBI error: Failed to find a non-bad " |
+ "block on mtd%d\n", mtd->index); |
+ goto cleanup; |
+ } |
+ } |
+ |
+ /* check if the read from flash was successful */ |
+ err = mtd_read(mtd, offset, 4, &len, (void *) magic); |
+ if ((err && !mtd_is_bitflip(err)) || len != 4) { |
+ pr_err("UBI error: unable to read from mtd%d\n", mtd->index); |
+ goto cleanup; |
+ } |
+ |
+ /* check for a valid ubi magic */ |
+ if (strncmp(magic, "UBI#", 4)) { |
+ pr_err("UBI error: no valid UBI magic found inside mtd%d\n", mtd->index); |
+ goto cleanup; |
+ } |
+ |
+ /* don't auto-add media types where UBI doesn't makes sense */ |
+ if (mtd->type != MTD_NANDFLASH && |
+ mtd->type != MTD_NORFLASH && |
+ mtd->type != MTD_DATAFLASH && |
+ mtd->type != MTD_MLCNANDFLASH) |
+ goto cleanup; |
+ |
+ mutex_lock(&ubi_devices_mutex); |
+ pr_notice("UBI: auto-attach mtd%d\n", mtd->index); |
+ err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 0, 0); |
+ mutex_unlock(&ubi_devices_mutex); |
+ if (err < 0) { |
+ pr_err("UBI error: cannot attach mtd%d\n", mtd->index); |
+ goto cleanup; |
+ } |
+ |
+ return; |
+ |
+cleanup: |
+ put_mtd_device(mtd); |
+} |
+ |
static int __init ubi_init(void) |
{ |
int err, i, k; |
@@ -1309,6 +1376,12 @@ static int __init ubi_init(void) |
} |
} |
+ /* auto-attach mtd devices only if built-in to the kernel and no ubi.mtd |
+ * parameter was given */ |
+ if (IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV) && |
+ !ubi_is_module() && !mtd_devs) |
+ ubi_auto_attach(); |
+ |
err = ubiblock_init(); |
if (err) { |
pr_err("UBI error: block: cannot initialize, error %d", err); |
/branches/18.06.1/target/linux/generic/pending-4.9/491-ubi-auto-create-ubiblock-device-for-rootfs.patch |
---|
@@ -0,0 +1,66 @@ |
From: Daniel Golle <daniel@makrotopia.org> |
Subject: ubi: auto-create ubiblock device for rootfs |
Signed-off-by: Daniel Golle <daniel@makrotopia.org> |
--- |
drivers/mtd/ubi/block.c | 42 ++++++++++++++++++++++++++++++++++++++++++ |
1 file changed, 42 insertions(+) |
--- a/drivers/mtd/ubi/block.c |
+++ b/drivers/mtd/ubi/block.c |
@@ -635,6 +635,44 @@ static void __init ubiblock_create_from_ |
} |
} |
+#define UBIFS_NODE_MAGIC 0x06101831 |
+static inline int ubi_vol_is_ubifs(struct ubi_volume_desc *desc) |
+{ |
+ int ret; |
+ uint32_t magic_of, magic; |
+ ret = ubi_read(desc, 0, (char *)&magic_of, 0, 4); |
+ if (ret) |
+ return 0; |
+ magic = le32_to_cpu(magic_of); |
+ return magic == UBIFS_NODE_MAGIC; |
+} |
+ |
+static void __init ubiblock_create_auto_rootfs(void) |
+{ |
+ int ubi_num, ret, is_ubifs; |
+ struct ubi_volume_desc *desc; |
+ struct ubi_volume_info vi; |
+ |
+ for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) { |
+ desc = ubi_open_volume_nm(ubi_num, "rootfs", UBI_READONLY); |
+ if (IS_ERR(desc)) |
+ continue; |
+ |
+ ubi_get_volume_info(desc, &vi); |
+ is_ubifs = ubi_vol_is_ubifs(desc); |
+ ubi_close_volume(desc); |
+ if (is_ubifs) |
+ break; |
+ |
+ ret = ubiblock_create(&vi); |
+ if (ret) |
+ pr_err("UBI error: block: can't add '%s' volume, err=%d\n", |
+ vi.name, ret); |
+ /* always break if we get here */ |
+ break; |
+ } |
+} |
+ |
static void ubiblock_remove_all(void) |
{ |
struct ubiblock *next; |
@@ -667,6 +705,10 @@ int __init ubiblock_init(void) |
*/ |
ubiblock_create_from_param(); |
+ /* auto-attach "rootfs" volume if existing and non-ubifs */ |
+ if (IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV)) |
+ ubiblock_create_auto_rootfs(); |
+ |
/* |
* Block devices are only created upon user requests, so we ignore |
* existing volumes. |
/branches/18.06.1/target/linux/generic/pending-4.9/492-try-auto-mounting-ubi0-rootfs-in-init-do_mounts.c.patch |
---|
@@ -0,0 +1,51 @@ |
From: Daniel Golle <daniel@makrotopia.org> |
Subject: try auto-mounting ubi0:rootfs in init/do_mounts.c |
Signed-off-by: Daniel Golle <daniel@makrotopia.org> |
--- |
init/do_mounts.c | 26 +++++++++++++++++++++++++- |
1 file changed, 25 insertions(+), 1 deletion(-) |
--- a/init/do_mounts.c |
+++ b/init/do_mounts.c |
@@ -438,7 +438,28 @@ retry: |
out: |
put_page(page); |
} |
- |
+ |
+static int __init mount_ubi_rootfs(void) |
+{ |
+ int flags = MS_SILENT; |
+ int err, tried = 0; |
+ |
+ while (tried < 2) { |
+ err = do_mount_root("ubi0:rootfs", "ubifs", flags, \ |
+ root_mount_data); |
+ switch (err) { |
+ case -EACCES: |
+ flags |= MS_RDONLY; |
+ tried++; |
+ break; |
+ default: |
+ return err; |
+ } |
+ } |
+ |
+ return -EINVAL; |
+} |
+ |
#ifdef CONFIG_ROOT_NFS |
#define NFSROOT_TIMEOUT_MIN 5 |
@@ -532,6 +553,10 @@ void __init mount_root(void) |
change_floppy("root floppy"); |
} |
#endif |
+#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV |
+ if (!mount_ubi_rootfs()) |
+ return; |
+#endif |
#ifdef CONFIG_BLOCK |
{ |
int err = create_dev("/dev/root", ROOT_DEV); |
/branches/18.06.1/target/linux/generic/pending-4.9/493-ubi-set-ROOT_DEV-to-ubiblock-rootfs-if-unset.patch |
---|
@@ -0,0 +1,34 @@ |
From: Daniel Golle <daniel@makrotopia.org> |
Subject: ubi: set ROOT_DEV to ubiblock "rootfs" if unset |
Signed-off-by: Daniel Golle <daniel@makrotopia.org> |
--- |
drivers/mtd/ubi/block.c | 10 ++++++++++ |
1 file changed, 10 insertions(+) |
--- a/drivers/mtd/ubi/block.c |
+++ b/drivers/mtd/ubi/block.c |
@@ -50,6 +50,7 @@ |
#include <linux/scatterlist.h> |
#include <linux/idr.h> |
#include <asm/div64.h> |
+#include <linux/root_dev.h> |
#include "ubi-media.h" |
#include "ubi.h" |
@@ -447,6 +448,15 @@ int ubiblock_create(struct ubi_volume_in |
dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)", |
dev->ubi_num, dev->vol_id, vi->name); |
mutex_unlock(&devices_mutex); |
+ |
+ if (!strcmp(vi->name, "rootfs") && |
+ IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV) && |
+ ROOT_DEV == 0) { |
+ pr_notice("ubiblock: device ubiblock%d_%d (%s) set to be root filesystem\n", |
+ dev->ubi_num, dev->vol_id, vi->name); |
+ ROOT_DEV = MKDEV(gd->major, gd->first_minor); |
+ } |
+ |
return 0; |
out_free_queue: |
/branches/18.06.1/target/linux/generic/pending-4.9/494-mtd-ubi-add-EOF-marker-support.patch |
---|
@@ -0,0 +1,60 @@ |
From: Gabor Juhos <juhosg@openwrt.org> |
Subject: mtd: add EOF marker support to the UBI layer |
Signed-off-by: Gabor Juhos <juhosg@openwrt.org> |
--- |
drivers/mtd/ubi/attach.c | 25 ++++++++++++++++++++++--- |
drivers/mtd/ubi/ubi.h | 1 + |
2 files changed, 23 insertions(+), 3 deletions(-) |
--- a/drivers/mtd/ubi/attach.c |
+++ b/drivers/mtd/ubi/attach.c |
@@ -939,6 +939,13 @@ static bool vol_ignored(int vol_id) |
#endif |
} |
+static bool ec_hdr_has_eof(struct ubi_ec_hdr *ech) |
+{ |
+ return ech->padding1[0] == 'E' && |
+ ech->padding1[1] == 'O' && |
+ ech->padding1[2] == 'F'; |
+} |
+ |
/** |
* scan_peb - scan and process UBI headers of a PEB. |
* @ubi: UBI device description object |
@@ -971,9 +978,21 @@ static int scan_peb(struct ubi_device *u |
return 0; |
} |
- err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); |
- if (err < 0) |
- return err; |
+ if (!ai->eof_found) { |
+ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); |
+ if (err < 0) |
+ return err; |
+ |
+ if (ec_hdr_has_eof(ech)) { |
+ pr_notice("UBI: EOF marker found, PEBs from %d will be erased\n", |
+ pnum); |
+ ai->eof_found = true; |
+ } |
+ } |
+ |
+ if (ai->eof_found) |
+ err = UBI_IO_FF_BITFLIPS; |
+ |
switch (err) { |
case 0: |
break; |
--- a/drivers/mtd/ubi/ubi.h |
+++ b/drivers/mtd/ubi/ubi.h |
@@ -779,6 +779,7 @@ struct ubi_attach_info { |
int mean_ec; |
uint64_t ec_sum; |
int ec_count; |
+ bool eof_found; |
struct kmem_cache *aeb_slab_cache; |
struct ubi_ec_hdr *ech; |
struct ubi_vid_io_buf *vidb; |
/branches/18.06.1/target/linux/generic/pending-4.9/530-jffs2_make_lzma_available.patch |
---|
@@ -0,0 +1,5180 @@ |
From: Alexandros C. Couloumbis <alex@ozo.com> |
Subject: fs: add jffs2/lzma support (not activated by default yet) |
lede-commit: c2c88d315fa0e881f8b19da07b62859b915b11b2 |
Signed-off-by: Alexandros C. Couloumbis <alex@ozo.com> |
--- |
fs/jffs2/Kconfig | 9 + |
fs/jffs2/Makefile | 3 + |
fs/jffs2/compr.c | 6 + |
fs/jffs2/compr.h | 10 +- |
fs/jffs2/compr_lzma.c | 128 +++ |
fs/jffs2/super.c | 33 +- |
include/linux/lzma.h | 62 ++ |
include/linux/lzma/LzFind.h | 115 +++ |
include/linux/lzma/LzHash.h | 54 + |
include/linux/lzma/LzmaDec.h | 231 +++++ |
include/linux/lzma/LzmaEnc.h | 80 ++ |
include/linux/lzma/Types.h | 226 +++++ |
include/uapi/linux/jffs2.h | 1 + |
lib/Kconfig | 6 + |
lib/Makefile | 12 + |
lib/lzma/LzFind.c | 761 ++++++++++++++ |
lib/lzma/LzmaDec.c | 999 +++++++++++++++++++ |
lib/lzma/LzmaEnc.c | 2271 ++++++++++++++++++++++++++++++++++++++++++ |
lib/lzma/Makefile | 7 + |
19 files changed, 5008 insertions(+), 6 deletions(-) |
create mode 100644 fs/jffs2/compr_lzma.c |
create mode 100644 include/linux/lzma.h |
create mode 100644 include/linux/lzma/LzFind.h |
create mode 100644 include/linux/lzma/LzHash.h |
create mode 100644 include/linux/lzma/LzmaDec.h |
create mode 100644 include/linux/lzma/LzmaEnc.h |
create mode 100644 include/linux/lzma/Types.h |
create mode 100644 lib/lzma/LzFind.c |
create mode 100644 lib/lzma/LzmaDec.c |
create mode 100644 lib/lzma/LzmaEnc.c |
create mode 100644 lib/lzma/Makefile |
--- a/fs/jffs2/Kconfig |
+++ b/fs/jffs2/Kconfig |
@@ -139,6 +139,15 @@ config JFFS2_LZO |
This feature was added in July, 2007. Say 'N' if you need |
compatibility with older bootloaders or kernels. |
+config JFFS2_LZMA |
+ bool "JFFS2 LZMA compression support" if JFFS2_COMPRESSION_OPTIONS |
+ select LZMA_COMPRESS |
+ select LZMA_DECOMPRESS |
+ depends on JFFS2_FS |
+ default n |
+ help |
+ JFFS2 wrapper to the LZMA C SDK |
+ |
config JFFS2_RTIME |
bool "JFFS2 RTIME compression support" if JFFS2_COMPRESSION_OPTIONS |
depends on JFFS2_FS |
--- a/fs/jffs2/Makefile |
+++ b/fs/jffs2/Makefile |
@@ -18,4 +18,7 @@ jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rub |
jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o |
jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o |
jffs2-$(CONFIG_JFFS2_LZO) += compr_lzo.o |
+jffs2-$(CONFIG_JFFS2_LZMA) += compr_lzma.o |
jffs2-$(CONFIG_JFFS2_SUMMARY) += summary.o |
+ |
+CFLAGS_compr_lzma.o += -Iinclude/linux -Ilib/lzma |
--- a/fs/jffs2/compr.c |
+++ b/fs/jffs2/compr.c |
@@ -378,6 +378,9 @@ int __init jffs2_compressors_init(void) |
#ifdef CONFIG_JFFS2_LZO |
jffs2_lzo_init(); |
#endif |
+#ifdef CONFIG_JFFS2_LZMA |
+ jffs2_lzma_init(); |
+#endif |
/* Setting default compression mode */ |
#ifdef CONFIG_JFFS2_CMODE_NONE |
jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; |
@@ -401,6 +404,9 @@ int __init jffs2_compressors_init(void) |
int jffs2_compressors_exit(void) |
{ |
/* Unregistering compressors */ |
+#ifdef CONFIG_JFFS2_LZMA |
+ jffs2_lzma_exit(); |
+#endif |
#ifdef CONFIG_JFFS2_LZO |
jffs2_lzo_exit(); |
#endif |
--- a/fs/jffs2/compr.h |
+++ b/fs/jffs2/compr.h |
@@ -29,9 +29,9 @@ |
#define JFFS2_DYNRUBIN_PRIORITY 20 |
#define JFFS2_LZARI_PRIORITY 30 |
#define JFFS2_RTIME_PRIORITY 50 |
-#define JFFS2_ZLIB_PRIORITY 60 |
-#define JFFS2_LZO_PRIORITY 80 |
- |
+#define JFFS2_LZMA_PRIORITY 70 |
+#define JFFS2_ZLIB_PRIORITY 80 |
+#define JFFS2_LZO_PRIORITY 90 |
#define JFFS2_RUBINMIPS_DISABLED /* RUBINs will be used only */ |
#define JFFS2_DYNRUBIN_DISABLED /* for decompression */ |
@@ -101,5 +101,9 @@ void jffs2_zlib_exit(void); |
int jffs2_lzo_init(void); |
void jffs2_lzo_exit(void); |
#endif |
+#ifdef CONFIG_JFFS2_LZMA |
+int jffs2_lzma_init(void); |
+void jffs2_lzma_exit(void); |
+#endif |
#endif /* __JFFS2_COMPR_H__ */ |
--- /dev/null |
+++ b/fs/jffs2/compr_lzma.c |
@@ -0,0 +1,128 @@ |
+/* |
+ * JFFS2 -- Journalling Flash File System, Version 2. |
+ * |
+ * For licensing information, see the file 'LICENCE' in this directory. |
+ * |
+ * JFFS2 wrapper to the LZMA C SDK |
+ * |
+ */ |
+ |
+#include <linux/lzma.h> |
+#include "compr.h" |
+ |
+#ifdef __KERNEL__ |
+ static DEFINE_MUTEX(deflate_mutex); |
+#endif |
+ |
+CLzmaEncHandle *p; |
+Byte propsEncoded[LZMA_PROPS_SIZE]; |
+SizeT propsSize = sizeof(propsEncoded); |
+ |
+STATIC void lzma_free_workspace(void) |
+{ |
+ LzmaEnc_Destroy(p, &lzma_alloc, &lzma_alloc); |
+} |
+ |
+STATIC int INIT lzma_alloc_workspace(CLzmaEncProps *props) |
+{ |
+ if ((p = (CLzmaEncHandle *)LzmaEnc_Create(&lzma_alloc)) == NULL) |
+ { |
+ PRINT_ERROR("Failed to allocate lzma deflate workspace\n"); |
+ return -ENOMEM; |
+ } |
+ |
+ if (LzmaEnc_SetProps(p, props) != SZ_OK) |
+ { |
+ lzma_free_workspace(); |
+ return -1; |
+ } |
+ |
+ if (LzmaEnc_WriteProperties(p, propsEncoded, &propsSize) != SZ_OK) |
+ { |
+ lzma_free_workspace(); |
+ return -1; |
+ } |
+ |
+ return 0; |
+} |
+ |
+STATIC int jffs2_lzma_compress(unsigned char *data_in, unsigned char *cpage_out, |
+ uint32_t *sourcelen, uint32_t *dstlen) |
+{ |
+ SizeT compress_size = (SizeT)(*dstlen); |
+ int ret; |
+ |
+ #ifdef __KERNEL__ |
+ mutex_lock(&deflate_mutex); |
+ #endif |
+ |
+ ret = LzmaEnc_MemEncode(p, cpage_out, &compress_size, data_in, *sourcelen, |
+ 0, NULL, &lzma_alloc, &lzma_alloc); |
+ |
+ #ifdef __KERNEL__ |
+ mutex_unlock(&deflate_mutex); |
+ #endif |
+ |
+ if (ret != SZ_OK) |
+ return -1; |
+ |
+ *dstlen = (uint32_t)compress_size; |
+ |
+ return 0; |
+} |
+ |
+STATIC int jffs2_lzma_decompress(unsigned char *data_in, unsigned char *cpage_out, |
+ uint32_t srclen, uint32_t destlen) |
+{ |
+ int ret; |
+ SizeT dl = (SizeT)destlen; |
+ SizeT sl = (SizeT)srclen; |
+ ELzmaStatus status; |
+ |
+ ret = LzmaDecode(cpage_out, &dl, data_in, &sl, propsEncoded, |
+ propsSize, LZMA_FINISH_ANY, &status, &lzma_alloc); |
+ |
+ if (ret != SZ_OK || status == LZMA_STATUS_NOT_FINISHED || dl != (SizeT)destlen) |
+ return -1; |
+ |
+ return 0; |
+} |
+ |
+static struct jffs2_compressor jffs2_lzma_comp = { |
+ .priority = JFFS2_LZMA_PRIORITY, |
+ .name = "lzma", |
+ .compr = JFFS2_COMPR_LZMA, |
+ .compress = &jffs2_lzma_compress, |
+ .decompress = &jffs2_lzma_decompress, |
+ .disabled = 0, |
+}; |
+ |
+int INIT jffs2_lzma_init(void) |
+{ |
+ int ret; |
+ CLzmaEncProps props; |
+ LzmaEncProps_Init(&props); |
+ |
+ props.dictSize = LZMA_BEST_DICT(0x2000); |
+ props.level = LZMA_BEST_LEVEL; |
+ props.lc = LZMA_BEST_LC; |
+ props.lp = LZMA_BEST_LP; |
+ props.pb = LZMA_BEST_PB; |
+ props.fb = LZMA_BEST_FB; |
+ |
+ ret = lzma_alloc_workspace(&props); |
+ if (ret < 0) |
+ return ret; |
+ |
+ ret = jffs2_register_compressor(&jffs2_lzma_comp); |
+ if (ret) |
+ lzma_free_workspace(); |
+ |
+ return ret; |
+} |
+ |
+void jffs2_lzma_exit(void) |
+{ |
+ jffs2_unregister_compressor(&jffs2_lzma_comp); |
+ lzma_free_workspace(); |
+} |
--- a/fs/jffs2/super.c |
+++ b/fs/jffs2/super.c |
@@ -371,14 +371,41 @@ static int __init init_jffs2_fs(void) |
BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68); |
BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32); |
- pr_info("version 2.2." |
+ pr_info("version 2.2" |
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER |
" (NAND)" |
#endif |
#ifdef CONFIG_JFFS2_SUMMARY |
- " (SUMMARY) " |
+ " (SUMMARY)" |
#endif |
- " © 2001-2006 Red Hat, Inc.\n"); |
+#ifdef CONFIG_JFFS2_ZLIB |
+ " (ZLIB)" |
+#endif |
+#ifdef CONFIG_JFFS2_LZO |
+ " (LZO)" |
+#endif |
+#ifdef CONFIG_JFFS2_LZMA |
+ " (LZMA)" |
+#endif |
+#ifdef CONFIG_JFFS2_RTIME |
+ " (RTIME)" |
+#endif |
+#ifdef CONFIG_JFFS2_RUBIN |
+ " (RUBIN)" |
+#endif |
+#ifdef CONFIG_JFFS2_CMODE_NONE |
+ " (CMODE_NONE)" |
+#endif |
+#ifdef CONFIG_JFFS2_CMODE_PRIORITY |
+ " (CMODE_PRIORITY)" |
+#endif |
+#ifdef CONFIG_JFFS2_CMODE_SIZE |
+ " (CMODE_SIZE)" |
+#endif |
+#ifdef CONFIG_JFFS2_CMODE_FAVOURLZO |
+ " (CMODE_FAVOURLZO)" |
+#endif |
+ " (c) 2001-2006 Red Hat, Inc.\n"); |
jffs2_inode_cachep = kmem_cache_create("jffs2_i", |
sizeof(struct jffs2_inode_info), |
--- /dev/null |
+++ b/include/linux/lzma.h |
@@ -0,0 +1,62 @@ |
+#ifndef __LZMA_H__ |
+#define __LZMA_H__ |
+ |
+#ifdef __KERNEL__ |
+ #include <linux/kernel.h> |
+ #include <linux/sched.h> |
+ #include <linux/slab.h> |
+ #include <linux/vmalloc.h> |
+ #include <linux/init.h> |
+ #define LZMA_MALLOC vmalloc |
+ #define LZMA_FREE vfree |
+ #define PRINT_ERROR(msg) printk(KERN_WARNING #msg) |
+ #define INIT __init |
+ #define STATIC static |
+#else |
+ #include <stdint.h> |
+ #include <stdlib.h> |
+ #include <stdio.h> |
+ #include <unistd.h> |
+ #include <string.h> |
+ #include <asm/types.h> |
+ #include <errno.h> |
+ #include <linux/jffs2.h> |
+ #ifndef PAGE_SIZE |
+ extern int page_size; |
+ #define PAGE_SIZE page_size |
+ #endif |
+ #define LZMA_MALLOC malloc |
+ #define LZMA_FREE free |
+ #define PRINT_ERROR(msg) fprintf(stderr, msg) |
+ #define INIT |
+ #define STATIC |
+#endif |
+ |
+#include "lzma/LzmaDec.h" |
+#include "lzma/LzmaEnc.h" |
+ |
+#define LZMA_BEST_LEVEL (9) |
+#define LZMA_BEST_LC (0) |
+#define LZMA_BEST_LP (0) |
+#define LZMA_BEST_PB (0) |
+#define LZMA_BEST_FB (273) |
+ |
+#define LZMA_BEST_DICT(n) (((int)((n) / 2)) * 2) |
+ |
+static void *p_lzma_malloc(void *p, size_t size) |
+{ |
+ if (size == 0) |
+ return NULL; |
+ |
+ return LZMA_MALLOC(size); |
+} |
+ |
+static void p_lzma_free(void *p, void *address) |
+{ |
+ if (address != NULL) |
+ LZMA_FREE(address); |
+} |
+ |
+static ISzAlloc lzma_alloc = {p_lzma_malloc, p_lzma_free}; |
+ |
+#endif |
--- /dev/null |
+++ b/include/linux/lzma/LzFind.h |
@@ -0,0 +1,115 @@ |
+/* LzFind.h -- Match finder for LZ algorithms |
+2009-04-22 : Igor Pavlov : Public domain */ |
+ |
+#ifndef __LZ_FIND_H |
+#define __LZ_FIND_H |
+ |
+#include "Types.h" |
+ |
+#ifdef __cplusplus |
+extern "C" { |
+#endif |
+ |
+typedef UInt32 CLzRef; |
+ |
+typedef struct _CMatchFinder |
+{ |
+ Byte *buffer; |
+ UInt32 pos; |
+ UInt32 posLimit; |
+ UInt32 streamPos; |
+ UInt32 lenLimit; |
+ |
+ UInt32 cyclicBufferPos; |
+ UInt32 cyclicBufferSize; /* it must be = (historySize + 1) */ |
+ |
+ UInt32 matchMaxLen; |
+ CLzRef *hash; |
+ CLzRef *son; |
+ UInt32 hashMask; |
+ UInt32 cutValue; |
+ |
+ Byte *bufferBase; |
+ ISeqInStream *stream; |
+ int streamEndWasReached; |
+ |
+ UInt32 blockSize; |
+ UInt32 keepSizeBefore; |
+ UInt32 keepSizeAfter; |
+ |
+ UInt32 numHashBytes; |
+ int directInput; |
+ size_t directInputRem; |
+ int btMode; |
+ int bigHash; |
+ UInt32 historySize; |
+ UInt32 fixedHashSize; |
+ UInt32 hashSizeSum; |
+ UInt32 numSons; |
+ SRes result; |
+ UInt32 crc[256]; |
+} CMatchFinder; |
+ |
+#define Inline_MatchFinder_GetPointerToCurrentPos(p) ((p)->buffer) |
+#define Inline_MatchFinder_GetIndexByte(p, index) ((p)->buffer[(Int32)(index)]) |
+ |
+#define Inline_MatchFinder_GetNumAvailableBytes(p) ((p)->streamPos - (p)->pos) |
+ |
+int MatchFinder_NeedMove(CMatchFinder *p); |
+Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p); |
+void MatchFinder_MoveBlock(CMatchFinder *p); |
+void MatchFinder_ReadIfRequired(CMatchFinder *p); |
+ |
+void MatchFinder_Construct(CMatchFinder *p); |
+ |
+/* Conditions: |
+ historySize <= 3 GB |
+ keepAddBufferBefore + matchMaxLen + keepAddBufferAfter < 511MB |
+*/ |
+int MatchFinder_Create(CMatchFinder *p, UInt32 historySize, |
+ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter, |
+ ISzAlloc *alloc); |
+void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc); |
+void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems); |
+void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue); |
+ |
+UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *buffer, CLzRef *son, |
+ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue, |
+ UInt32 *distances, UInt32 maxLen); |
+ |
+/* |
+Conditions: |
+ Mf_GetNumAvailableBytes_Func must be called before each Mf_GetMatchLen_Func. |
+ Mf_GetPointerToCurrentPos_Func's result must be used only before any other function |
+*/ |
+ |
+typedef void (*Mf_Init_Func)(void *object); |
+typedef Byte (*Mf_GetIndexByte_Func)(void *object, Int32 index); |
+typedef UInt32 (*Mf_GetNumAvailableBytes_Func)(void *object); |
+typedef const Byte * (*Mf_GetPointerToCurrentPos_Func)(void *object); |
+typedef UInt32 (*Mf_GetMatches_Func)(void *object, UInt32 *distances); |
+typedef void (*Mf_Skip_Func)(void *object, UInt32); |
+ |
+typedef struct _IMatchFinder |
+{ |
+ Mf_Init_Func Init; |
+ Mf_GetIndexByte_Func GetIndexByte; |
+ Mf_GetNumAvailableBytes_Func GetNumAvailableBytes; |
+ Mf_GetPointerToCurrentPos_Func GetPointerToCurrentPos; |
+ Mf_GetMatches_Func GetMatches; |
+ Mf_Skip_Func Skip; |
+} IMatchFinder; |
+ |
+void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable); |
+ |
+void MatchFinder_Init(CMatchFinder *p); |
+UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances); |
+UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances); |
+void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num); |
+void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num); |
+ |
+#ifdef __cplusplus |
+} |
+#endif |
+ |
+#endif |
--- /dev/null |
+++ b/include/linux/lzma/LzHash.h |
@@ -0,0 +1,54 @@ |
+/* LzHash.h -- HASH functions for LZ algorithms |
+2009-02-07 : Igor Pavlov : Public domain */ |
+ |
+#ifndef __LZ_HASH_H |
+#define __LZ_HASH_H |
+ |
+#define kHash2Size (1 << 10) |
+#define kHash3Size (1 << 16) |
+#define kHash4Size (1 << 20) |
+ |
+#define kFix3HashSize (kHash2Size) |
+#define kFix4HashSize (kHash2Size + kHash3Size) |
+#define kFix5HashSize (kHash2Size + kHash3Size + kHash4Size) |
+ |
+#define HASH2_CALC hashValue = cur[0] | ((UInt32)cur[1] << 8); |
+ |
+#define HASH3_CALC { \ |
+ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ |
+ hash2Value = temp & (kHash2Size - 1); \ |
+ hashValue = (temp ^ ((UInt32)cur[2] << 8)) & p->hashMask; } |
+ |
+#define HASH4_CALC { \ |
+ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ |
+ hash2Value = temp & (kHash2Size - 1); \ |
+ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \ |
+ hashValue = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & p->hashMask; } |
+ |
+#define HASH5_CALC { \ |
+ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ |
+ hash2Value = temp & (kHash2Size - 1); \ |
+ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \ |
+ hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)); \ |
+ hashValue = (hash4Value ^ (p->crc[cur[4]] << 3)) & p->hashMask; \ |
+ hash4Value &= (kHash4Size - 1); } |
+ |
+/* #define HASH_ZIP_CALC hashValue = ((cur[0] | ((UInt32)cur[1] << 8)) ^ p->crc[cur[2]]) & 0xFFFF; */ |
+#define HASH_ZIP_CALC hashValue = ((cur[2] | ((UInt32)cur[0] << 8)) ^ p->crc[cur[1]]) & 0xFFFF; |
+ |
+ |
+#define MT_HASH2_CALC \ |
+ hash2Value = (p->crc[cur[0]] ^ cur[1]) & (kHash2Size - 1); |
+ |
+#define MT_HASH3_CALC { \ |
+ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ |
+ hash2Value = temp & (kHash2Size - 1); \ |
+ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); } |
+ |
+#define MT_HASH4_CALC { \ |
+ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ |
+ hash2Value = temp & (kHash2Size - 1); \ |
+ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \ |
+ hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & (kHash4Size - 1); } |
+ |
+#endif |
--- /dev/null |
+++ b/include/linux/lzma/LzmaDec.h |
@@ -0,0 +1,231 @@ |
+/* LzmaDec.h -- LZMA Decoder |
+2009-02-07 : Igor Pavlov : Public domain */ |
+ |
+#ifndef __LZMA_DEC_H |
+#define __LZMA_DEC_H |
+ |
+#include "Types.h" |
+ |
+#ifdef __cplusplus |
+extern "C" { |
+#endif |
+ |
+/* #define _LZMA_PROB32 */ |
+/* _LZMA_PROB32 can increase the speed on some CPUs, |
+ but memory usage for CLzmaDec::probs will be doubled in that case */ |
+ |
+#ifdef _LZMA_PROB32 |
+#define CLzmaProb UInt32 |
+#else |
+#define CLzmaProb UInt16 |
+#endif |
+ |
+ |
+/* ---------- LZMA Properties ---------- */ |
+ |
+#define LZMA_PROPS_SIZE 5 |
+ |
+typedef struct _CLzmaProps |
+{ |
+ unsigned lc, lp, pb; |
+ UInt32 dicSize; |
+} CLzmaProps; |
+ |
+/* LzmaProps_Decode - decodes properties |
+Returns: |
+ SZ_OK |
+ SZ_ERROR_UNSUPPORTED - Unsupported properties |
+*/ |
+ |
+SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size); |
+ |
+ |
+/* ---------- LZMA Decoder state ---------- */ |
+ |
+/* LZMA_REQUIRED_INPUT_MAX = number of required input bytes for worst case. |
+ Num bits = log2((2^11 / 31) ^ 22) + 26 < 134 + 26 = 160; */ |
+ |
+#define LZMA_REQUIRED_INPUT_MAX 20 |
+ |
+typedef struct |
+{ |
+ CLzmaProps prop; |
+ CLzmaProb *probs; |
+ Byte *dic; |
+ const Byte *buf; |
+ UInt32 range, code; |
+ SizeT dicPos; |
+ SizeT dicBufSize; |
+ UInt32 processedPos; |
+ UInt32 checkDicSize; |
+ unsigned state; |
+ UInt32 reps[4]; |
+ unsigned remainLen; |
+ int needFlush; |
+ int needInitState; |
+ UInt32 numProbs; |
+ unsigned tempBufSize; |
+ Byte tempBuf[LZMA_REQUIRED_INPUT_MAX]; |
+} CLzmaDec; |
+ |
+#define LzmaDec_Construct(p) { (p)->dic = 0; (p)->probs = 0; } |
+ |
+void LzmaDec_Init(CLzmaDec *p); |
+ |
+/* There are two types of LZMA streams: |
+ 0) Stream with end mark. That end mark adds about 6 bytes to compressed size. |
+ 1) Stream without end mark. You must know exact uncompressed size to decompress such stream. */ |
+ |
+typedef enum |
+{ |
+ LZMA_FINISH_ANY, /* finish at any point */ |
+ LZMA_FINISH_END /* block must be finished at the end */ |
+} ELzmaFinishMode; |
+ |
+/* ELzmaFinishMode has meaning only if the decoding reaches output limit !!! |
+ |
+ You must use LZMA_FINISH_END, when you know that current output buffer |
+ covers last bytes of block. In other cases you must use LZMA_FINISH_ANY. |
+ |
+ If LZMA decoder sees end marker before reaching output limit, it returns SZ_OK, |
+ and output value of destLen will be less than output buffer size limit. |
+ You can check status result also. |
+ |
+ You can use multiple checks to test data integrity after full decompression: |
+ 1) Check Result and "status" variable. |
+ 2) Check that output(destLen) = uncompressedSize, if you know real uncompressedSize. |
+ 3) Check that output(srcLen) = compressedSize, if you know real compressedSize. |
+ You must use correct finish mode in that case. */ |
+ |
+typedef enum |
+{ |
+ LZMA_STATUS_NOT_SPECIFIED, /* use main error code instead */ |
+ LZMA_STATUS_FINISHED_WITH_MARK, /* stream was finished with end mark. */ |
+ LZMA_STATUS_NOT_FINISHED, /* stream was not finished */ |
+ LZMA_STATUS_NEEDS_MORE_INPUT, /* you must provide more input bytes */ |
+ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK /* there is probability that stream was finished without end mark */ |
+} ELzmaStatus; |
+ |
+/* ELzmaStatus is used only as output value for function call */ |
+ |
+ |
+/* ---------- Interfaces ---------- */ |
+ |
+/* There are 3 levels of interfaces: |
+ 1) Dictionary Interface |
+ 2) Buffer Interface |
+ 3) One Call Interface |
+ You can select any of these interfaces, but don't mix functions from different |
+ groups for same object. */ |
+ |
+ |
+/* There are two variants to allocate state for Dictionary Interface: |
+ 1) LzmaDec_Allocate / LzmaDec_Free |
+ 2) LzmaDec_AllocateProbs / LzmaDec_FreeProbs |
+ You can use variant 2, if you set dictionary buffer manually. |
+ For Buffer Interface you must always use variant 1. |
+ |
+LzmaDec_Allocate* can return: |
+ SZ_OK |
+ SZ_ERROR_MEM - Memory allocation error |
+ SZ_ERROR_UNSUPPORTED - Unsupported properties |
+*/ |
+ |
+SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc); |
+void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc); |
+ |
+SRes LzmaDec_Allocate(CLzmaDec *state, const Byte *prop, unsigned propsSize, ISzAlloc *alloc); |
+void LzmaDec_Free(CLzmaDec *state, ISzAlloc *alloc); |
+ |
+/* ---------- Dictionary Interface ---------- */ |
+ |
+/* You can use it, if you want to eliminate the overhead for data copying from |
+ dictionary to some other external buffer. |
+ You must work with CLzmaDec variables directly in this interface. |
+ |
+ STEPS: |
+ LzmaDec_Constr() |
+ LzmaDec_Allocate() |
+ for (each new stream) |
+ { |
+ LzmaDec_Init() |
+ while (it needs more decompression) |
+ { |
+ LzmaDec_DecodeToDic() |
+ use data from CLzmaDec::dic and update CLzmaDec::dicPos |
+ } |
+ } |
+ LzmaDec_Free() |
+*/ |
+ |
+/* LzmaDec_DecodeToDic |
+ |
+ The decoding to internal dictionary buffer (CLzmaDec::dic). |
+ You must manually update CLzmaDec::dicPos, if it reaches CLzmaDec::dicBufSize !!! |
+ |
+finishMode: |
+ It has meaning only if the decoding reaches output limit (dicLimit). |
+ LZMA_FINISH_ANY - Decode just dicLimit bytes. |
+ LZMA_FINISH_END - Stream must be finished after dicLimit. |
+ |
+Returns: |
+ SZ_OK |
+ status: |
+ LZMA_STATUS_FINISHED_WITH_MARK |
+ LZMA_STATUS_NOT_FINISHED |
+ LZMA_STATUS_NEEDS_MORE_INPUT |
+ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK |
+ SZ_ERROR_DATA - Data error |
+*/ |
+ |
+SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, |
+ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status); |
+ |
+ |
+/* ---------- Buffer Interface ---------- */ |
+ |
+/* It's zlib-like interface. |
+ See LzmaDec_DecodeToDic description for information about STEPS and return results, |
+ but you must use LzmaDec_DecodeToBuf instead of LzmaDec_DecodeToDic and you don't need |
+ to work with CLzmaDec variables manually. |
+ |
+finishMode: |
+ It has meaning only if the decoding reaches output limit (*destLen). |
+ LZMA_FINISH_ANY - Decode just destLen bytes. |
+ LZMA_FINISH_END - Stream must be finished after (*destLen). |
+*/ |
+ |
+SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, |
+ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status); |
+ |
+ |
+/* ---------- One Call Interface ---------- */ |
+ |
+/* LzmaDecode |
+ |
+finishMode: |
+ It has meaning only if the decoding reaches output limit (*destLen). |
+ LZMA_FINISH_ANY - Decode just destLen bytes. |
+ LZMA_FINISH_END - Stream must be finished after (*destLen). |
+ |
+Returns: |
+ SZ_OK |
+ status: |
+ LZMA_STATUS_FINISHED_WITH_MARK |
+ LZMA_STATUS_NOT_FINISHED |
+ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK |
+ SZ_ERROR_DATA - Data error |
+ SZ_ERROR_MEM - Memory allocation error |
+ SZ_ERROR_UNSUPPORTED - Unsupported properties |
+ SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src). |
+*/ |
+ |
+SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, |
+ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode, |
+ ELzmaStatus *status, ISzAlloc *alloc); |
+ |
+#ifdef __cplusplus |
+} |
+#endif |
+ |
+#endif |
--- /dev/null |
+++ b/include/linux/lzma/LzmaEnc.h |
@@ -0,0 +1,80 @@ |
+/* LzmaEnc.h -- LZMA Encoder |
+2009-02-07 : Igor Pavlov : Public domain */ |
+ |
+#ifndef __LZMA_ENC_H |
+#define __LZMA_ENC_H |
+ |
+#include "Types.h" |
+ |
+#ifdef __cplusplus |
+extern "C" { |
+#endif |
+ |
+#define LZMA_PROPS_SIZE 5 |
+ |
+typedef struct _CLzmaEncProps |
+{ |
+ int level; /* 0 <= level <= 9 */ |
+ UInt32 dictSize; /* (1 << 12) <= dictSize <= (1 << 27) for 32-bit version |
+ (1 << 12) <= dictSize <= (1 << 30) for 64-bit version |
+ default = (1 << 24) */ |
+ int lc; /* 0 <= lc <= 8, default = 3 */ |
+ int lp; /* 0 <= lp <= 4, default = 0 */ |
+ int pb; /* 0 <= pb <= 4, default = 2 */ |
+ int algo; /* 0 - fast, 1 - normal, default = 1 */ |
+ int fb; /* 5 <= fb <= 273, default = 32 */ |
+ int btMode; /* 0 - hashChain Mode, 1 - binTree mode - normal, default = 1 */ |
+ int numHashBytes; /* 2, 3 or 4, default = 4 */ |
+ UInt32 mc; /* 1 <= mc <= (1 << 30), default = 32 */ |
+ unsigned writeEndMark; /* 0 - do not write EOPM, 1 - write EOPM, default = 0 */ |
+ int numThreads; /* 1 or 2, default = 2 */ |
+} CLzmaEncProps; |
+ |
+void LzmaEncProps_Init(CLzmaEncProps *p); |
+void LzmaEncProps_Normalize(CLzmaEncProps *p); |
+UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2); |
+ |
+ |
+/* ---------- CLzmaEncHandle Interface ---------- */ |
+ |
+/* LzmaEnc_* functions can return the following exit codes: |
+Returns: |
+ SZ_OK - OK |
+ SZ_ERROR_MEM - Memory allocation error |
+ SZ_ERROR_PARAM - Incorrect paramater in props |
+ SZ_ERROR_WRITE - Write callback error. |
+ SZ_ERROR_PROGRESS - some break from progress callback |
+ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version) |
+*/ |
+ |
+typedef void * CLzmaEncHandle; |
+ |
+CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc); |
+void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig); |
+SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props); |
+SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *properties, SizeT *size); |
+SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStream *outStream, ISeqInStream *inStream, |
+ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig); |
+SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, |
+ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig); |
+ |
+/* ---------- One Call Interface ---------- */ |
+ |
+/* LzmaEncode |
+Return code: |
+ SZ_OK - OK |
+ SZ_ERROR_MEM - Memory allocation error |
+ SZ_ERROR_PARAM - Incorrect paramater |
+ SZ_ERROR_OUTPUT_EOF - output buffer overflow |
+ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version) |
+*/ |
+ |
+SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, |
+ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark, |
+ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig); |
+ |
+#ifdef __cplusplus |
+} |
+#endif |
+ |
+#endif |
--- /dev/null |
+++ b/include/linux/lzma/Types.h |
@@ -0,0 +1,226 @@ |
+/* Types.h -- Basic types |
+2009-11-23 : Igor Pavlov : Public domain */ |
+ |
+#ifndef __7Z_TYPES_H |
+#define __7Z_TYPES_H |
+ |
+#include <stddef.h> |
+ |
+#ifdef _WIN32 |
+#include <windows.h> |
+#endif |
+ |
+#ifndef EXTERN_C_BEGIN |
+#ifdef __cplusplus |
+#define EXTERN_C_BEGIN extern "C" { |
+#define EXTERN_C_END } |
+#else |
+#define EXTERN_C_BEGIN |
+#define EXTERN_C_END |
+#endif |
+#endif |
+ |
+EXTERN_C_BEGIN |
+ |
+#define SZ_OK 0 |
+ |
+#define SZ_ERROR_DATA 1 |
+#define SZ_ERROR_MEM 2 |
+#define SZ_ERROR_CRC 3 |
+#define SZ_ERROR_UNSUPPORTED 4 |
+#define SZ_ERROR_PARAM 5 |
+#define SZ_ERROR_INPUT_EOF 6 |
+#define SZ_ERROR_OUTPUT_EOF 7 |
+#define SZ_ERROR_READ 8 |
+#define SZ_ERROR_WRITE 9 |
+#define SZ_ERROR_PROGRESS 10 |
+#define SZ_ERROR_FAIL 11 |
+#define SZ_ERROR_THREAD 12 |
+ |
+#define SZ_ERROR_ARCHIVE 16 |
+#define SZ_ERROR_NO_ARCHIVE 17 |
+ |
+typedef int SRes; |
+ |
+#ifdef _WIN32 |
+typedef DWORD WRes; |
+#else |
+typedef int WRes; |
+#endif |
+ |
+#ifndef RINOK |
+#define RINOK(x) { int __result__ = (x); if (__result__ != 0) return __result__; } |
+#endif |
+ |
+typedef unsigned char Byte; |
+typedef short Int16; |
+typedef unsigned short UInt16; |
+ |
+#ifdef _LZMA_UINT32_IS_ULONG |
+typedef long Int32; |
+typedef unsigned long UInt32; |
+#else |
+typedef int Int32; |
+typedef unsigned int UInt32; |
+#endif |
+ |
+#ifdef _SZ_NO_INT_64 |
+ |
+/* define _SZ_NO_INT_64, if your compiler doesn't support 64-bit integers. |
+ NOTES: Some code will work incorrectly in that case! */ |
+ |
+typedef long Int64; |
+typedef unsigned long UInt64; |
+ |
+#else |
+ |
+#if defined(_MSC_VER) || defined(__BORLANDC__) |
+typedef __int64 Int64; |
+typedef unsigned __int64 UInt64; |
+#else |
+typedef long long int Int64; |
+typedef unsigned long long int UInt64; |
+#endif |
+ |
+#endif |
+ |
+#ifdef _LZMA_NO_SYSTEM_SIZE_T |
+typedef UInt32 SizeT; |
+#else |
+typedef size_t SizeT; |
+#endif |
+ |
+typedef int Bool; |
+#define True 1 |
+#define False 0 |
+ |
+ |
+#ifdef _WIN32 |
+#define MY_STD_CALL __stdcall |
+#else |
+#define MY_STD_CALL |
+#endif |
+ |
+#ifdef _MSC_VER |
+ |
+#if _MSC_VER >= 1300 |
+#define MY_NO_INLINE __declspec(noinline) |
+#else |
+#define MY_NO_INLINE |
+#endif |
+ |
+#define MY_CDECL __cdecl |
+#define MY_FAST_CALL __fastcall |
+ |
+#else |
+ |
+#define MY_CDECL |
+#define MY_FAST_CALL |
+ |
+#endif |
+ |
+ |
+/* The following interfaces use first parameter as pointer to structure */ |
+ |
+typedef struct |
+{ |
+ SRes (*Read)(void *p, void *buf, size_t *size); |
+ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream. |
+ (output(*size) < input(*size)) is allowed */ |
+} ISeqInStream; |
+ |
+/* it can return SZ_ERROR_INPUT_EOF */ |
+SRes SeqInStream_Read(ISeqInStream *stream, void *buf, size_t size); |
+SRes SeqInStream_Read2(ISeqInStream *stream, void *buf, size_t size, SRes errorType); |
+SRes SeqInStream_ReadByte(ISeqInStream *stream, Byte *buf); |
+ |
+typedef struct |
+{ |
+ size_t (*Write)(void *p, const void *buf, size_t size); |
+ /* Returns: result - the number of actually written bytes. |
+ (result < size) means error */ |
+} ISeqOutStream; |
+ |
+typedef enum |
+{ |
+ SZ_SEEK_SET = 0, |
+ SZ_SEEK_CUR = 1, |
+ SZ_SEEK_END = 2 |
+} ESzSeek; |
+ |
+typedef struct |
+{ |
+ SRes (*Read)(void *p, void *buf, size_t *size); /* same as ISeqInStream::Read */ |
+ SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin); |
+} ISeekInStream; |
+ |
+typedef struct |
+{ |
+ SRes (*Look)(void *p, void **buf, size_t *size); |
+ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream. |
+ (output(*size) > input(*size)) is not allowed |
+ (output(*size) < input(*size)) is allowed */ |
+ SRes (*Skip)(void *p, size_t offset); |
+ /* offset must be <= output(*size) of Look */ |
+ |
+ SRes (*Read)(void *p, void *buf, size_t *size); |
+ /* reads directly (without buffer). It's same as ISeqInStream::Read */ |
+ SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin); |
+} ILookInStream; |
+ |
+SRes LookInStream_LookRead(ILookInStream *stream, void *buf, size_t *size); |
+SRes LookInStream_SeekTo(ILookInStream *stream, UInt64 offset); |
+ |
+/* reads via ILookInStream::Read */ |
+SRes LookInStream_Read2(ILookInStream *stream, void *buf, size_t size, SRes errorType); |
+SRes LookInStream_Read(ILookInStream *stream, void *buf, size_t size); |
+ |
+#define LookToRead_BUF_SIZE (1 << 14) |
+ |
+typedef struct |
+{ |
+ ILookInStream s; |
+ ISeekInStream *realStream; |
+ size_t pos; |
+ size_t size; |
+ Byte buf[LookToRead_BUF_SIZE]; |
+} CLookToRead; |
+ |
+void LookToRead_CreateVTable(CLookToRead *p, int lookahead); |
+void LookToRead_Init(CLookToRead *p); |
+ |
+typedef struct |
+{ |
+ ISeqInStream s; |
+ ILookInStream *realStream; |
+} CSecToLook; |
+ |
+void SecToLook_CreateVTable(CSecToLook *p); |
+ |
+typedef struct |
+{ |
+ ISeqInStream s; |
+ ILookInStream *realStream; |
+} CSecToRead; |
+ |
+void SecToRead_CreateVTable(CSecToRead *p); |
+ |
+typedef struct |
+{ |
+ SRes (*Progress)(void *p, UInt64 inSize, UInt64 outSize); |
+ /* Returns: result. (result != SZ_OK) means break. |
+ Value (UInt64)(Int64)-1 for size means unknown value. */ |
+} ICompressProgress; |
+ |
+typedef struct |
+{ |
+ void *(*Alloc)(void *p, size_t size); |
+ void (*Free)(void *p, void *address); /* address can be 0 */ |
+} ISzAlloc; |
+ |
+#define IAlloc_Alloc(p, size) (p)->Alloc((p), size) |
+#define IAlloc_Free(p, a) (p)->Free((p), a) |
+ |
+EXTERN_C_END |
+ |
+#endif |
--- a/include/uapi/linux/jffs2.h |
+++ b/include/uapi/linux/jffs2.h |
@@ -46,6 +46,7 @@ |
#define JFFS2_COMPR_DYNRUBIN 0x05 |
#define JFFS2_COMPR_ZLIB 0x06 |
#define JFFS2_COMPR_LZO 0x07 |
+#define JFFS2_COMPR_LZMA 0x08 |
/* Compatibility flags. */ |
#define JFFS2_COMPAT_MASK 0xc000 /* What do to if an unknown nodetype is found */ |
#define JFFS2_NODE_ACCURATE 0x2000 |
--- a/lib/Kconfig |
+++ b/lib/Kconfig |
@@ -241,6 +241,12 @@ config LZ4_DECOMPRESS |
source "lib/xz/Kconfig" |
+config LZMA_COMPRESS |
+ tristate |
+ |
+config LZMA_DECOMPRESS |
+ tristate |
+ |
# |
# These all provide a common interface (hence the apparent duplication with |
# ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.) |
--- a/lib/Makefile |
+++ b/lib/Makefile |
@@ -2,6 +2,16 @@ |
# Makefile for some libs needed in the kernel. |
# |
+ifdef CONFIG_JFFS2_ZLIB |
+ CONFIG_ZLIB_INFLATE:=y |
+ CONFIG_ZLIB_DEFLATE:=y |
+endif |
+ |
+ifdef CONFIG_JFFS2_LZMA |
+ CONFIG_LZMA_DECOMPRESS:=y |
+ CONFIG_LZMA_COMPRESS:=y |
+endif |
+ |
ifdef CONFIG_FUNCTION_TRACER |
ORIG_CFLAGS := $(KBUILD_CFLAGS) |
KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS)) |
@@ -108,6 +118,8 @@ obj-$(CONFIG_LZ4HC_COMPRESS) += lz4/ |
obj-$(CONFIG_LZ4_DECOMPRESS) += lz4/ |
obj-$(CONFIG_XZ_DEC) += xz/ |
obj-$(CONFIG_RAID6_PQ) += raid6/ |
+obj-$(CONFIG_LZMA_COMPRESS) += lzma/ |
+obj-$(CONFIG_LZMA_DECOMPRESS) += lzma/ |
lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o |
lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o |
--- /dev/null |
+++ b/lib/lzma/LzFind.c |
@@ -0,0 +1,761 @@ |
+/* LzFind.c -- Match finder for LZ algorithms |
+2009-04-22 : Igor Pavlov : Public domain */ |
+ |
+#include <string.h> |
+ |
+#include "LzFind.h" |
+#include "LzHash.h" |
+ |
+#define kEmptyHashValue 0 |
+#define kMaxValForNormalize ((UInt32)0xFFFFFFFF) |
+#define kNormalizeStepMin (1 << 10) /* it must be power of 2 */ |
+#define kNormalizeMask (~(kNormalizeStepMin - 1)) |
+#define kMaxHistorySize ((UInt32)3 << 30) |
+ |
+#define kStartMaxLen 3 |
+ |
+static void LzInWindow_Free(CMatchFinder *p, ISzAlloc *alloc) |
+{ |
+ if (!p->directInput) |
+ { |
+ alloc->Free(alloc, p->bufferBase); |
+ p->bufferBase = 0; |
+ } |
+} |
+ |
+/* keepSizeBefore + keepSizeAfter + keepSizeReserv must be < 4G) */ |
+ |
+static int LzInWindow_Create(CMatchFinder *p, UInt32 keepSizeReserv, ISzAlloc *alloc) |
+{ |
+ UInt32 blockSize = p->keepSizeBefore + p->keepSizeAfter + keepSizeReserv; |
+ if (p->directInput) |
+ { |
+ p->blockSize = blockSize; |
+ return 1; |
+ } |
+ if (p->bufferBase == 0 || p->blockSize != blockSize) |
+ { |
+ LzInWindow_Free(p, alloc); |
+ p->blockSize = blockSize; |
+ p->bufferBase = (Byte *)alloc->Alloc(alloc, (size_t)blockSize); |
+ } |
+ return (p->bufferBase != 0); |
+} |
+ |
+Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; } |
+Byte MatchFinder_GetIndexByte(CMatchFinder *p, Int32 index) { return p->buffer[index]; } |
+ |
+UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return p->streamPos - p->pos; } |
+ |
+void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue) |
+{ |
+ p->posLimit -= subValue; |
+ p->pos -= subValue; |
+ p->streamPos -= subValue; |
+} |
+ |
+static void MatchFinder_ReadBlock(CMatchFinder *p) |
+{ |
+ if (p->streamEndWasReached || p->result != SZ_OK) |
+ return; |
+ if (p->directInput) |
+ { |
+ UInt32 curSize = 0xFFFFFFFF - p->streamPos; |
+ if (curSize > p->directInputRem) |
+ curSize = (UInt32)p->directInputRem; |
+ p->directInputRem -= curSize; |
+ p->streamPos += curSize; |
+ if (p->directInputRem == 0) |
+ p->streamEndWasReached = 1; |
+ return; |
+ } |
+ for (;;) |
+ { |
+ Byte *dest = p->buffer + (p->streamPos - p->pos); |
+ size_t size = (p->bufferBase + p->blockSize - dest); |
+ if (size == 0) |
+ return; |
+ p->result = p->stream->Read(p->stream, dest, &size); |
+ if (p->result != SZ_OK) |
+ return; |
+ if (size == 0) |
+ { |
+ p->streamEndWasReached = 1; |
+ return; |
+ } |
+ p->streamPos += (UInt32)size; |
+ if (p->streamPos - p->pos > p->keepSizeAfter) |
+ return; |
+ } |
+} |
+ |
+void MatchFinder_MoveBlock(CMatchFinder *p) |
+{ |
+ memmove(p->bufferBase, |
+ p->buffer - p->keepSizeBefore, |
+ (size_t)(p->streamPos - p->pos + p->keepSizeBefore)); |
+ p->buffer = p->bufferBase + p->keepSizeBefore; |
+} |
+ |
+int MatchFinder_NeedMove(CMatchFinder *p) |
+{ |
+ if (p->directInput) |
+ return 0; |
+ /* if (p->streamEndWasReached) return 0; */ |
+ return ((size_t)(p->bufferBase + p->blockSize - p->buffer) <= p->keepSizeAfter); |
+} |
+ |
+void MatchFinder_ReadIfRequired(CMatchFinder *p) |
+{ |
+ if (p->streamEndWasReached) |
+ return; |
+ if (p->keepSizeAfter >= p->streamPos - p->pos) |
+ MatchFinder_ReadBlock(p); |
+} |
+ |
+static void MatchFinder_CheckAndMoveAndRead(CMatchFinder *p) |
+{ |
+ if (MatchFinder_NeedMove(p)) |
+ MatchFinder_MoveBlock(p); |
+ MatchFinder_ReadBlock(p); |
+} |
+ |
+static void MatchFinder_SetDefaultSettings(CMatchFinder *p) |
+{ |
+ p->cutValue = 32; |
+ p->btMode = 1; |
+ p->numHashBytes = 4; |
+ p->bigHash = 0; |
+} |
+ |
+#define kCrcPoly 0xEDB88320 |
+ |
+void MatchFinder_Construct(CMatchFinder *p) |
+{ |
+ UInt32 i; |
+ p->bufferBase = 0; |
+ p->directInput = 0; |
+ p->hash = 0; |
+ MatchFinder_SetDefaultSettings(p); |
+ |
+ for (i = 0; i < 256; i++) |
+ { |
+ UInt32 r = i; |
+ int j; |
+ for (j = 0; j < 8; j++) |
+ r = (r >> 1) ^ (kCrcPoly & ~((r & 1) - 1)); |
+ p->crc[i] = r; |
+ } |
+} |
+ |
+static void MatchFinder_FreeThisClassMemory(CMatchFinder *p, ISzAlloc *alloc) |
+{ |
+ alloc->Free(alloc, p->hash); |
+ p->hash = 0; |
+} |
+ |
+void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc) |
+{ |
+ MatchFinder_FreeThisClassMemory(p, alloc); |
+ LzInWindow_Free(p, alloc); |
+} |
+ |
+static CLzRef* AllocRefs(UInt32 num, ISzAlloc *alloc) |
+{ |
+ size_t sizeInBytes = (size_t)num * sizeof(CLzRef); |
+ if (sizeInBytes / sizeof(CLzRef) != num) |
+ return 0; |
+ return (CLzRef *)alloc->Alloc(alloc, sizeInBytes); |
+} |
+ |
+int MatchFinder_Create(CMatchFinder *p, UInt32 historySize, |
+ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter, |
+ ISzAlloc *alloc) |
+{ |
+ UInt32 sizeReserv; |
+ if (historySize > kMaxHistorySize) |
+ { |
+ MatchFinder_Free(p, alloc); |
+ return 0; |
+ } |
+ sizeReserv = historySize >> 1; |
+ if (historySize > ((UInt32)2 << 30)) |
+ sizeReserv = historySize >> 2; |
+ sizeReserv += (keepAddBufferBefore + matchMaxLen + keepAddBufferAfter) / 2 + (1 << 19); |
+ |
+ p->keepSizeBefore = historySize + keepAddBufferBefore + 1; |
+ p->keepSizeAfter = matchMaxLen + keepAddBufferAfter; |
+ /* we need one additional byte, since we use MoveBlock after pos++ and before dictionary using */ |
+ if (LzInWindow_Create(p, sizeReserv, alloc)) |
+ { |
+ UInt32 newCyclicBufferSize = historySize + 1; |
+ UInt32 hs; |
+ p->matchMaxLen = matchMaxLen; |
+ { |
+ p->fixedHashSize = 0; |
+ if (p->numHashBytes == 2) |
+ hs = (1 << 16) - 1; |
+ else |
+ { |
+ hs = historySize - 1; |
+ hs |= (hs >> 1); |
+ hs |= (hs >> 2); |
+ hs |= (hs >> 4); |
+ hs |= (hs >> 8); |
+ hs >>= 1; |
+ hs |= 0xFFFF; /* don't change it! It's required for Deflate */ |
+ if (hs > (1 << 24)) |
+ { |
+ if (p->numHashBytes == 3) |
+ hs = (1 << 24) - 1; |
+ else |
+ hs >>= 1; |
+ } |
+ } |
+ p->hashMask = hs; |
+ hs++; |
+ if (p->numHashBytes > 2) p->fixedHashSize += kHash2Size; |
+ if (p->numHashBytes > 3) p->fixedHashSize += kHash3Size; |
+ if (p->numHashBytes > 4) p->fixedHashSize += kHash4Size; |
+ hs += p->fixedHashSize; |
+ } |
+ |
+ { |
+ UInt32 prevSize = p->hashSizeSum + p->numSons; |
+ UInt32 newSize; |
+ p->historySize = historySize; |
+ p->hashSizeSum = hs; |
+ p->cyclicBufferSize = newCyclicBufferSize; |
+ p->numSons = (p->btMode ? newCyclicBufferSize * 2 : newCyclicBufferSize); |
+ newSize = p->hashSizeSum + p->numSons; |
+ if (p->hash != 0 && prevSize == newSize) |
+ return 1; |
+ MatchFinder_FreeThisClassMemory(p, alloc); |
+ p->hash = AllocRefs(newSize, alloc); |
+ if (p->hash != 0) |
+ { |
+ p->son = p->hash + p->hashSizeSum; |
+ return 1; |
+ } |
+ } |
+ } |
+ MatchFinder_Free(p, alloc); |
+ return 0; |
+} |
+ |
+static void MatchFinder_SetLimits(CMatchFinder *p) |
+{ |
+ UInt32 limit = kMaxValForNormalize - p->pos; |
+ UInt32 limit2 = p->cyclicBufferSize - p->cyclicBufferPos; |
+ if (limit2 < limit) |
+ limit = limit2; |
+ limit2 = p->streamPos - p->pos; |
+ if (limit2 <= p->keepSizeAfter) |
+ { |
+ if (limit2 > 0) |
+ limit2 = 1; |
+ } |
+ else |
+ limit2 -= p->keepSizeAfter; |
+ if (limit2 < limit) |
+ limit = limit2; |
+ { |
+ UInt32 lenLimit = p->streamPos - p->pos; |
+ if (lenLimit > p->matchMaxLen) |
+ lenLimit = p->matchMaxLen; |
+ p->lenLimit = lenLimit; |
+ } |
+ p->posLimit = p->pos + limit; |
+} |
+ |
+void MatchFinder_Init(CMatchFinder *p) |
+{ |
+ UInt32 i; |
+ for (i = 0; i < p->hashSizeSum; i++) |
+ p->hash[i] = kEmptyHashValue; |
+ p->cyclicBufferPos = 0; |
+ p->buffer = p->bufferBase; |
+ p->pos = p->streamPos = p->cyclicBufferSize; |
+ p->result = SZ_OK; |
+ p->streamEndWasReached = 0; |
+ MatchFinder_ReadBlock(p); |
+ MatchFinder_SetLimits(p); |
+} |
+ |
+static UInt32 MatchFinder_GetSubValue(CMatchFinder *p) |
+{ |
+ return (p->pos - p->historySize - 1) & kNormalizeMask; |
+} |
+ |
+void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems) |
+{ |
+ UInt32 i; |
+ for (i = 0; i < numItems; i++) |
+ { |
+ UInt32 value = items[i]; |
+ if (value <= subValue) |
+ value = kEmptyHashValue; |
+ else |
+ value -= subValue; |
+ items[i] = value; |
+ } |
+} |
+ |
+static void MatchFinder_Normalize(CMatchFinder *p) |
+{ |
+ UInt32 subValue = MatchFinder_GetSubValue(p); |
+ MatchFinder_Normalize3(subValue, p->hash, p->hashSizeSum + p->numSons); |
+ MatchFinder_ReduceOffsets(p, subValue); |
+} |
+ |
+static void MatchFinder_CheckLimits(CMatchFinder *p) |
+{ |
+ if (p->pos == kMaxValForNormalize) |
+ MatchFinder_Normalize(p); |
+ if (!p->streamEndWasReached && p->keepSizeAfter == p->streamPos - p->pos) |
+ MatchFinder_CheckAndMoveAndRead(p); |
+ if (p->cyclicBufferPos == p->cyclicBufferSize) |
+ p->cyclicBufferPos = 0; |
+ MatchFinder_SetLimits(p); |
+} |
+ |
+static UInt32 * Hc_GetMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son, |
+ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue, |
+ UInt32 *distances, UInt32 maxLen) |
+{ |
+ son[_cyclicBufferPos] = curMatch; |
+ for (;;) |
+ { |
+ UInt32 delta = pos - curMatch; |
+ if (cutValue-- == 0 || delta >= _cyclicBufferSize) |
+ return distances; |
+ { |
+ const Byte *pb = cur - delta; |
+ curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)]; |
+ if (pb[maxLen] == cur[maxLen] && *pb == *cur) |
+ { |
+ UInt32 len = 0; |
+ while (++len != lenLimit) |
+ if (pb[len] != cur[len]) |
+ break; |
+ if (maxLen < len) |
+ { |
+ *distances++ = maxLen = len; |
+ *distances++ = delta - 1; |
+ if (len == lenLimit) |
+ return distances; |
+ } |
+ } |
+ } |
+ } |
+} |
+ |
+UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son, |
+ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue, |
+ UInt32 *distances, UInt32 maxLen) |
+{ |
+ CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1; |
+ CLzRef *ptr1 = son + (_cyclicBufferPos << 1); |
+ UInt32 len0 = 0, len1 = 0; |
+ for (;;) |
+ { |
+ UInt32 delta = pos - curMatch; |
+ if (cutValue-- == 0 || delta >= _cyclicBufferSize) |
+ { |
+ *ptr0 = *ptr1 = kEmptyHashValue; |
+ return distances; |
+ } |
+ { |
+ CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1); |
+ const Byte *pb = cur - delta; |
+ UInt32 len = (len0 < len1 ? len0 : len1); |
+ if (pb[len] == cur[len]) |
+ { |
+ if (++len != lenLimit && pb[len] == cur[len]) |
+ while (++len != lenLimit) |
+ if (pb[len] != cur[len]) |
+ break; |
+ if (maxLen < len) |
+ { |
+ *distances++ = maxLen = len; |
+ *distances++ = delta - 1; |
+ if (len == lenLimit) |
+ { |
+ *ptr1 = pair[0]; |
+ *ptr0 = pair[1]; |
+ return distances; |
+ } |
+ } |
+ } |
+ if (pb[len] < cur[len]) |
+ { |
+ *ptr1 = curMatch; |
+ ptr1 = pair + 1; |
+ curMatch = *ptr1; |
+ len1 = len; |
+ } |
+ else |
+ { |
+ *ptr0 = curMatch; |
+ ptr0 = pair; |
+ curMatch = *ptr0; |
+ len0 = len; |
+ } |
+ } |
+ } |
+} |
+ |
+static void SkipMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son, |
+ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue) |
+{ |
+ CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1; |
+ CLzRef *ptr1 = son + (_cyclicBufferPos << 1); |
+ UInt32 len0 = 0, len1 = 0; |
+ for (;;) |
+ { |
+ UInt32 delta = pos - curMatch; |
+ if (cutValue-- == 0 || delta >= _cyclicBufferSize) |
+ { |
+ *ptr0 = *ptr1 = kEmptyHashValue; |
+ return; |
+ } |
+ { |
+ CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1); |
+ const Byte *pb = cur - delta; |
+ UInt32 len = (len0 < len1 ? len0 : len1); |
+ if (pb[len] == cur[len]) |
+ { |
+ while (++len != lenLimit) |
+ if (pb[len] != cur[len]) |
+ break; |
+ { |
+ if (len == lenLimit) |
+ { |
+ *ptr1 = pair[0]; |
+ *ptr0 = pair[1]; |
+ return; |
+ } |
+ } |
+ } |
+ if (pb[len] < cur[len]) |
+ { |
+ *ptr1 = curMatch; |
+ ptr1 = pair + 1; |
+ curMatch = *ptr1; |
+ len1 = len; |
+ } |
+ else |
+ { |
+ *ptr0 = curMatch; |
+ ptr0 = pair; |
+ curMatch = *ptr0; |
+ len0 = len; |
+ } |
+ } |
+ } |
+} |
+ |
+#define MOVE_POS \ |
+ ++p->cyclicBufferPos; \ |
+ p->buffer++; \ |
+ if (++p->pos == p->posLimit) MatchFinder_CheckLimits(p); |
+ |
+#define MOVE_POS_RET MOVE_POS return offset; |
+ |
+static void MatchFinder_MovePos(CMatchFinder *p) { MOVE_POS; } |
+ |
+#define GET_MATCHES_HEADER2(minLen, ret_op) \ |
+ UInt32 lenLimit; UInt32 hashValue; const Byte *cur; UInt32 curMatch; \ |
+ lenLimit = p->lenLimit; { if (lenLimit < minLen) { MatchFinder_MovePos(p); ret_op; }} \ |
+ cur = p->buffer; |
+ |
+#define GET_MATCHES_HEADER(minLen) GET_MATCHES_HEADER2(minLen, return 0) |
+#define SKIP_HEADER(minLen) GET_MATCHES_HEADER2(minLen, continue) |
+ |
+#define MF_PARAMS(p) p->pos, p->buffer, p->son, p->cyclicBufferPos, p->cyclicBufferSize, p->cutValue |
+ |
+#define GET_MATCHES_FOOTER(offset, maxLen) \ |
+ offset = (UInt32)(GetMatchesSpec1(lenLimit, curMatch, MF_PARAMS(p), \ |
+ distances + offset, maxLen) - distances); MOVE_POS_RET; |
+ |
+#define SKIP_FOOTER \ |
+ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p)); MOVE_POS; |
+ |
+static UInt32 Bt2_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) |
+{ |
+ UInt32 offset; |
+ GET_MATCHES_HEADER(2) |
+ HASH2_CALC; |
+ curMatch = p->hash[hashValue]; |
+ p->hash[hashValue] = p->pos; |
+ offset = 0; |
+ GET_MATCHES_FOOTER(offset, 1) |
+} |
+ |
+UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) |
+{ |
+ UInt32 offset; |
+ GET_MATCHES_HEADER(3) |
+ HASH_ZIP_CALC; |
+ curMatch = p->hash[hashValue]; |
+ p->hash[hashValue] = p->pos; |
+ offset = 0; |
+ GET_MATCHES_FOOTER(offset, 2) |
+} |
+ |
+static UInt32 Bt3_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) |
+{ |
+ UInt32 hash2Value, delta2, maxLen, offset; |
+ GET_MATCHES_HEADER(3) |
+ |
+ HASH3_CALC; |
+ |
+ delta2 = p->pos - p->hash[hash2Value]; |
+ curMatch = p->hash[kFix3HashSize + hashValue]; |
+ |
+ p->hash[hash2Value] = |
+ p->hash[kFix3HashSize + hashValue] = p->pos; |
+ |
+ |
+ maxLen = 2; |
+ offset = 0; |
+ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur) |
+ { |
+ for (; maxLen != lenLimit; maxLen++) |
+ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen]) |
+ break; |
+ distances[0] = maxLen; |
+ distances[1] = delta2 - 1; |
+ offset = 2; |
+ if (maxLen == lenLimit) |
+ { |
+ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p)); |
+ MOVE_POS_RET; |
+ } |
+ } |
+ GET_MATCHES_FOOTER(offset, maxLen) |
+} |
+ |
+static UInt32 Bt4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) |
+{ |
+ UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset; |
+ GET_MATCHES_HEADER(4) |
+ |
+ HASH4_CALC; |
+ |
+ delta2 = p->pos - p->hash[ hash2Value]; |
+ delta3 = p->pos - p->hash[kFix3HashSize + hash3Value]; |
+ curMatch = p->hash[kFix4HashSize + hashValue]; |
+ |
+ p->hash[ hash2Value] = |
+ p->hash[kFix3HashSize + hash3Value] = |
+ p->hash[kFix4HashSize + hashValue] = p->pos; |
+ |
+ maxLen = 1; |
+ offset = 0; |
+ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur) |
+ { |
+ distances[0] = maxLen = 2; |
+ distances[1] = delta2 - 1; |
+ offset = 2; |
+ } |
+ if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur) |
+ { |
+ maxLen = 3; |
+ distances[offset + 1] = delta3 - 1; |
+ offset += 2; |
+ delta2 = delta3; |
+ } |
+ if (offset != 0) |
+ { |
+ for (; maxLen != lenLimit; maxLen++) |
+ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen]) |
+ break; |
+ distances[offset - 2] = maxLen; |
+ if (maxLen == lenLimit) |
+ { |
+ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p)); |
+ MOVE_POS_RET; |
+ } |
+ } |
+ if (maxLen < 3) |
+ maxLen = 3; |
+ GET_MATCHES_FOOTER(offset, maxLen) |
+} |
+ |
+static UInt32 Hc4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) |
+{ |
+ UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset; |
+ GET_MATCHES_HEADER(4) |
+ |
+ HASH4_CALC; |
+ |
+ delta2 = p->pos - p->hash[ hash2Value]; |
+ delta3 = p->pos - p->hash[kFix3HashSize + hash3Value]; |
+ curMatch = p->hash[kFix4HashSize + hashValue]; |
+ |
+ p->hash[ hash2Value] = |
+ p->hash[kFix3HashSize + hash3Value] = |
+ p->hash[kFix4HashSize + hashValue] = p->pos; |
+ |
+ maxLen = 1; |
+ offset = 0; |
+ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur) |
+ { |
+ distances[0] = maxLen = 2; |
+ distances[1] = delta2 - 1; |
+ offset = 2; |
+ } |
+ if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur) |
+ { |
+ maxLen = 3; |
+ distances[offset + 1] = delta3 - 1; |
+ offset += 2; |
+ delta2 = delta3; |
+ } |
+ if (offset != 0) |
+ { |
+ for (; maxLen != lenLimit; maxLen++) |
+ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen]) |
+ break; |
+ distances[offset - 2] = maxLen; |
+ if (maxLen == lenLimit) |
+ { |
+ p->son[p->cyclicBufferPos] = curMatch; |
+ MOVE_POS_RET; |
+ } |
+ } |
+ if (maxLen < 3) |
+ maxLen = 3; |
+ offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p), |
+ distances + offset, maxLen) - (distances)); |
+ MOVE_POS_RET |
+} |
+ |
+UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) |
+{ |
+ UInt32 offset; |
+ GET_MATCHES_HEADER(3) |
+ HASH_ZIP_CALC; |
+ curMatch = p->hash[hashValue]; |
+ p->hash[hashValue] = p->pos; |
+ offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p), |
+ distances, 2) - (distances)); |
+ MOVE_POS_RET |
+} |
+ |
+static void Bt2_MatchFinder_Skip(CMatchFinder *p, UInt32 num) |
+{ |
+ do |
+ { |
+ SKIP_HEADER(2) |
+ HASH2_CALC; |
+ curMatch = p->hash[hashValue]; |
+ p->hash[hashValue] = p->pos; |
+ SKIP_FOOTER |
+ } |
+ while (--num != 0); |
+} |
+ |
+void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num) |
+{ |
+ do |
+ { |
+ SKIP_HEADER(3) |
+ HASH_ZIP_CALC; |
+ curMatch = p->hash[hashValue]; |
+ p->hash[hashValue] = p->pos; |
+ SKIP_FOOTER |
+ } |
+ while (--num != 0); |
+} |
+ |
+static void Bt3_MatchFinder_Skip(CMatchFinder *p, UInt32 num) |
+{ |
+ do |
+ { |
+ UInt32 hash2Value; |
+ SKIP_HEADER(3) |
+ HASH3_CALC; |
+ curMatch = p->hash[kFix3HashSize + hashValue]; |
+ p->hash[hash2Value] = |
+ p->hash[kFix3HashSize + hashValue] = p->pos; |
+ SKIP_FOOTER |
+ } |
+ while (--num != 0); |
+} |
+ |
+static void Bt4_MatchFinder_Skip(CMatchFinder *p, UInt32 num) |
+{ |
+ do |
+ { |
+ UInt32 hash2Value, hash3Value; |
+ SKIP_HEADER(4) |
+ HASH4_CALC; |
+ curMatch = p->hash[kFix4HashSize + hashValue]; |
+ p->hash[ hash2Value] = |
+ p->hash[kFix3HashSize + hash3Value] = p->pos; |
+ p->hash[kFix4HashSize + hashValue] = p->pos; |
+ SKIP_FOOTER |
+ } |
+ while (--num != 0); |
+} |
+ |
+static void Hc4_MatchFinder_Skip(CMatchFinder *p, UInt32 num) |
+{ |
+ do |
+ { |
+ UInt32 hash2Value, hash3Value; |
+ SKIP_HEADER(4) |
+ HASH4_CALC; |
+ curMatch = p->hash[kFix4HashSize + hashValue]; |
+ p->hash[ hash2Value] = |
+ p->hash[kFix3HashSize + hash3Value] = |
+ p->hash[kFix4HashSize + hashValue] = p->pos; |
+ p->son[p->cyclicBufferPos] = curMatch; |
+ MOVE_POS |
+ } |
+ while (--num != 0); |
+} |
+ |
+void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num) |
+{ |
+ do |
+ { |
+ SKIP_HEADER(3) |
+ HASH_ZIP_CALC; |
+ curMatch = p->hash[hashValue]; |
+ p->hash[hashValue] = p->pos; |
+ p->son[p->cyclicBufferPos] = curMatch; |
+ MOVE_POS |
+ } |
+ while (--num != 0); |
+} |
+ |
+void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable) |
+{ |
+ vTable->Init = (Mf_Init_Func)MatchFinder_Init; |
+ vTable->GetIndexByte = (Mf_GetIndexByte_Func)MatchFinder_GetIndexByte; |
+ vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinder_GetNumAvailableBytes; |
+ vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinder_GetPointerToCurrentPos; |
+ if (!p->btMode) |
+ { |
+ vTable->GetMatches = (Mf_GetMatches_Func)Hc4_MatchFinder_GetMatches; |
+ vTable->Skip = (Mf_Skip_Func)Hc4_MatchFinder_Skip; |
+ } |
+ else if (p->numHashBytes == 2) |
+ { |
+ vTable->GetMatches = (Mf_GetMatches_Func)Bt2_MatchFinder_GetMatches; |
+ vTable->Skip = (Mf_Skip_Func)Bt2_MatchFinder_Skip; |
+ } |
+ else if (p->numHashBytes == 3) |
+ { |
+ vTable->GetMatches = (Mf_GetMatches_Func)Bt3_MatchFinder_GetMatches; |
+ vTable->Skip = (Mf_Skip_Func)Bt3_MatchFinder_Skip; |
+ } |
+ else |
+ { |
+ vTable->GetMatches = (Mf_GetMatches_Func)Bt4_MatchFinder_GetMatches; |
+ vTable->Skip = (Mf_Skip_Func)Bt4_MatchFinder_Skip; |
+ } |
+} |
--- /dev/null |
+++ b/lib/lzma/LzmaDec.c |
@@ -0,0 +1,999 @@ |
+/* LzmaDec.c -- LZMA Decoder |
+2009-09-20 : Igor Pavlov : Public domain */ |
+ |
+#include "LzmaDec.h" |
+ |
+#include <string.h> |
+ |
+#define kNumTopBits 24 |
+#define kTopValue ((UInt32)1 << kNumTopBits) |
+ |
+#define kNumBitModelTotalBits 11 |
+#define kBitModelTotal (1 << kNumBitModelTotalBits) |
+#define kNumMoveBits 5 |
+ |
+#define RC_INIT_SIZE 5 |
+ |
+#define NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | (*buf++); } |
+ |
+#define IF_BIT_0(p) ttt = *(p); NORMALIZE; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound) |
+#define UPDATE_0(p) range = bound; *(p) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits)); |
+#define UPDATE_1(p) range -= bound; code -= bound; *(p) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits)); |
+#define GET_BIT2(p, i, A0, A1) IF_BIT_0(p) \ |
+ { UPDATE_0(p); i = (i + i); A0; } else \ |
+ { UPDATE_1(p); i = (i + i) + 1; A1; } |
+#define GET_BIT(p, i) GET_BIT2(p, i, ; , ;) |
+ |
+#define TREE_GET_BIT(probs, i) { GET_BIT((probs + i), i); } |
+#define TREE_DECODE(probs, limit, i) \ |
+ { i = 1; do { TREE_GET_BIT(probs, i); } while (i < limit); i -= limit; } |
+ |
+/* #define _LZMA_SIZE_OPT */ |
+ |
+#ifdef _LZMA_SIZE_OPT |
+#define TREE_6_DECODE(probs, i) TREE_DECODE(probs, (1 << 6), i) |
+#else |
+#define TREE_6_DECODE(probs, i) \ |
+ { i = 1; \ |
+ TREE_GET_BIT(probs, i); \ |
+ TREE_GET_BIT(probs, i); \ |
+ TREE_GET_BIT(probs, i); \ |
+ TREE_GET_BIT(probs, i); \ |
+ TREE_GET_BIT(probs, i); \ |
+ TREE_GET_BIT(probs, i); \ |
+ i -= 0x40; } |
+#endif |
+ |
+#define NORMALIZE_CHECK if (range < kTopValue) { if (buf >= bufLimit) return DUMMY_ERROR; range <<= 8; code = (code << 8) | (*buf++); } |
+ |
+#define IF_BIT_0_CHECK(p) ttt = *(p); NORMALIZE_CHECK; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound) |
+#define UPDATE_0_CHECK range = bound; |
+#define UPDATE_1_CHECK range -= bound; code -= bound; |
+#define GET_BIT2_CHECK(p, i, A0, A1) IF_BIT_0_CHECK(p) \ |
+ { UPDATE_0_CHECK; i = (i + i); A0; } else \ |
+ { UPDATE_1_CHECK; i = (i + i) + 1; A1; } |
+#define GET_BIT_CHECK(p, i) GET_BIT2_CHECK(p, i, ; , ;) |
+#define TREE_DECODE_CHECK(probs, limit, i) \ |
+ { i = 1; do { GET_BIT_CHECK(probs + i, i) } while (i < limit); i -= limit; } |
+ |
+ |
+#define kNumPosBitsMax 4 |
+#define kNumPosStatesMax (1 << kNumPosBitsMax) |
+ |
+#define kLenNumLowBits 3 |
+#define kLenNumLowSymbols (1 << kLenNumLowBits) |
+#define kLenNumMidBits 3 |
+#define kLenNumMidSymbols (1 << kLenNumMidBits) |
+#define kLenNumHighBits 8 |
+#define kLenNumHighSymbols (1 << kLenNumHighBits) |
+ |
+#define LenChoice 0 |
+#define LenChoice2 (LenChoice + 1) |
+#define LenLow (LenChoice2 + 1) |
+#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits)) |
+#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits)) |
+#define kNumLenProbs (LenHigh + kLenNumHighSymbols) |
+ |
+ |
+#define kNumStates 12 |
+#define kNumLitStates 7 |
+ |
+#define kStartPosModelIndex 4 |
+#define kEndPosModelIndex 14 |
+#define kNumFullDistances (1 << (kEndPosModelIndex >> 1)) |
+ |
+#define kNumPosSlotBits 6 |
+#define kNumLenToPosStates 4 |
+ |
+#define kNumAlignBits 4 |
+#define kAlignTableSize (1 << kNumAlignBits) |
+ |
+#define kMatchMinLen 2 |
+#define kMatchSpecLenStart (kMatchMinLen + kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols) |
+ |
+#define IsMatch 0 |
+#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax)) |
+#define IsRepG0 (IsRep + kNumStates) |
+#define IsRepG1 (IsRepG0 + kNumStates) |
+#define IsRepG2 (IsRepG1 + kNumStates) |
+#define IsRep0Long (IsRepG2 + kNumStates) |
+#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax)) |
+#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits)) |
+#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex) |
+#define LenCoder (Align + kAlignTableSize) |
+#define RepLenCoder (LenCoder + kNumLenProbs) |
+#define Literal (RepLenCoder + kNumLenProbs) |
+ |
+#define LZMA_BASE_SIZE 1846 |
+#define LZMA_LIT_SIZE 768 |
+ |
+#define LzmaProps_GetNumProbs(p) ((UInt32)LZMA_BASE_SIZE + (LZMA_LIT_SIZE << ((p)->lc + (p)->lp))) |
+ |
+#if Literal != LZMA_BASE_SIZE |
+StopCompilingDueBUG |
+#endif |
+ |
+#define LZMA_DIC_MIN (1 << 12) |
+ |
+/* First LZMA-symbol is always decoded. |
+And it decodes new LZMA-symbols while (buf < bufLimit), but "buf" is without last normalization |
+Out: |
+ Result: |
+ SZ_OK - OK |
+ SZ_ERROR_DATA - Error |
+ p->remainLen: |
+ < kMatchSpecLenStart : normal remain |
+ = kMatchSpecLenStart : finished |
+ = kMatchSpecLenStart + 1 : Flush marker |
+ = kMatchSpecLenStart + 2 : State Init Marker |
+*/ |
+ |
+static int MY_FAST_CALL LzmaDec_DecodeReal(CLzmaDec *p, SizeT limit, const Byte *bufLimit) |
+{ |
+ CLzmaProb *probs = p->probs; |
+ |
+ unsigned state = p->state; |
+ UInt32 rep0 = p->reps[0], rep1 = p->reps[1], rep2 = p->reps[2], rep3 = p->reps[3]; |
+ unsigned pbMask = ((unsigned)1 << (p->prop.pb)) - 1; |
+ unsigned lpMask = ((unsigned)1 << (p->prop.lp)) - 1; |
+ unsigned lc = p->prop.lc; |
+ |
+ Byte *dic = p->dic; |
+ SizeT dicBufSize = p->dicBufSize; |
+ SizeT dicPos = p->dicPos; |
+ |
+ UInt32 processedPos = p->processedPos; |
+ UInt32 checkDicSize = p->checkDicSize; |
+ unsigned len = 0; |
+ |
+ const Byte *buf = p->buf; |
+ UInt32 range = p->range; |
+ UInt32 code = p->code; |
+ |
+ do |
+ { |
+ CLzmaProb *prob; |
+ UInt32 bound; |
+ unsigned ttt; |
+ unsigned posState = processedPos & pbMask; |
+ |
+ prob = probs + IsMatch + (state << kNumPosBitsMax) + posState; |
+ IF_BIT_0(prob) |
+ { |
+ unsigned symbol; |
+ UPDATE_0(prob); |
+ prob = probs + Literal; |
+ if (checkDicSize != 0 || processedPos != 0) |
+ prob += (LZMA_LIT_SIZE * (((processedPos & lpMask) << lc) + |
+ (dic[(dicPos == 0 ? dicBufSize : dicPos) - 1] >> (8 - lc)))); |
+ |
+ if (state < kNumLitStates) |
+ { |
+ state -= (state < 4) ? state : 3; |
+ symbol = 1; |
+ do { GET_BIT(prob + symbol, symbol) } while (symbol < 0x100); |
+ } |
+ else |
+ { |
+ unsigned matchByte = p->dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)]; |
+ unsigned offs = 0x100; |
+ state -= (state < 10) ? 3 : 6; |
+ symbol = 1; |
+ do |
+ { |
+ unsigned bit; |
+ CLzmaProb *probLit; |
+ matchByte <<= 1; |
+ bit = (matchByte & offs); |
+ probLit = prob + offs + bit + symbol; |
+ GET_BIT2(probLit, symbol, offs &= ~bit, offs &= bit) |
+ } |
+ while (symbol < 0x100); |
+ } |
+ dic[dicPos++] = (Byte)symbol; |
+ processedPos++; |
+ continue; |
+ } |
+ else |
+ { |
+ UPDATE_1(prob); |
+ prob = probs + IsRep + state; |
+ IF_BIT_0(prob) |
+ { |
+ UPDATE_0(prob); |
+ state += kNumStates; |
+ prob = probs + LenCoder; |
+ } |
+ else |
+ { |
+ UPDATE_1(prob); |
+ if (checkDicSize == 0 && processedPos == 0) |
+ return SZ_ERROR_DATA; |
+ prob = probs + IsRepG0 + state; |
+ IF_BIT_0(prob) |
+ { |
+ UPDATE_0(prob); |
+ prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState; |
+ IF_BIT_0(prob) |
+ { |
+ UPDATE_0(prob); |
+ dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)]; |
+ dicPos++; |
+ processedPos++; |
+ state = state < kNumLitStates ? 9 : 11; |
+ continue; |
+ } |
+ UPDATE_1(prob); |
+ } |
+ else |
+ { |
+ UInt32 distance; |
+ UPDATE_1(prob); |
+ prob = probs + IsRepG1 + state; |
+ IF_BIT_0(prob) |
+ { |
+ UPDATE_0(prob); |
+ distance = rep1; |
+ } |
+ else |
+ { |
+ UPDATE_1(prob); |
+ prob = probs + IsRepG2 + state; |
+ IF_BIT_0(prob) |
+ { |
+ UPDATE_0(prob); |
+ distance = rep2; |
+ } |
+ else |
+ { |
+ UPDATE_1(prob); |
+ distance = rep3; |
+ rep3 = rep2; |
+ } |
+ rep2 = rep1; |
+ } |
+ rep1 = rep0; |
+ rep0 = distance; |
+ } |
+ state = state < kNumLitStates ? 8 : 11; |
+ prob = probs + RepLenCoder; |
+ } |
+ { |
+ unsigned limit, offset; |
+ CLzmaProb *probLen = prob + LenChoice; |
+ IF_BIT_0(probLen) |
+ { |
+ UPDATE_0(probLen); |
+ probLen = prob + LenLow + (posState << kLenNumLowBits); |
+ offset = 0; |
+ limit = (1 << kLenNumLowBits); |
+ } |
+ else |
+ { |
+ UPDATE_1(probLen); |
+ probLen = prob + LenChoice2; |
+ IF_BIT_0(probLen) |
+ { |
+ UPDATE_0(probLen); |
+ probLen = prob + LenMid + (posState << kLenNumMidBits); |
+ offset = kLenNumLowSymbols; |
+ limit = (1 << kLenNumMidBits); |
+ } |
+ else |
+ { |
+ UPDATE_1(probLen); |
+ probLen = prob + LenHigh; |
+ offset = kLenNumLowSymbols + kLenNumMidSymbols; |
+ limit = (1 << kLenNumHighBits); |
+ } |
+ } |
+ TREE_DECODE(probLen, limit, len); |
+ len += offset; |
+ } |
+ |
+ if (state >= kNumStates) |
+ { |
+ UInt32 distance; |
+ prob = probs + PosSlot + |
+ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << kNumPosSlotBits); |
+ TREE_6_DECODE(prob, distance); |
+ if (distance >= kStartPosModelIndex) |
+ { |
+ unsigned posSlot = (unsigned)distance; |
+ int numDirectBits = (int)(((distance >> 1) - 1)); |
+ distance = (2 | (distance & 1)); |
+ if (posSlot < kEndPosModelIndex) |
+ { |
+ distance <<= numDirectBits; |
+ prob = probs + SpecPos + distance - posSlot - 1; |
+ { |
+ UInt32 mask = 1; |
+ unsigned i = 1; |
+ do |
+ { |
+ GET_BIT2(prob + i, i, ; , distance |= mask); |
+ mask <<= 1; |
+ } |
+ while (--numDirectBits != 0); |
+ } |
+ } |
+ else |
+ { |
+ numDirectBits -= kNumAlignBits; |
+ do |
+ { |
+ NORMALIZE |
+ range >>= 1; |
+ |
+ { |
+ UInt32 t; |
+ code -= range; |
+ t = (0 - ((UInt32)code >> 31)); /* (UInt32)((Int32)code >> 31) */ |
+ distance = (distance << 1) + (t + 1); |
+ code += range & t; |
+ } |
+ /* |
+ distance <<= 1; |
+ if (code >= range) |
+ { |
+ code -= range; |
+ distance |= 1; |
+ } |
+ */ |
+ } |
+ while (--numDirectBits != 0); |
+ prob = probs + Align; |
+ distance <<= kNumAlignBits; |
+ { |
+ unsigned i = 1; |
+ GET_BIT2(prob + i, i, ; , distance |= 1); |
+ GET_BIT2(prob + i, i, ; , distance |= 2); |
+ GET_BIT2(prob + i, i, ; , distance |= 4); |
+ GET_BIT2(prob + i, i, ; , distance |= 8); |
+ } |
+ if (distance == (UInt32)0xFFFFFFFF) |
+ { |
+ len += kMatchSpecLenStart; |
+ state -= kNumStates; |
+ break; |
+ } |
+ } |
+ } |
+ rep3 = rep2; |
+ rep2 = rep1; |
+ rep1 = rep0; |
+ rep0 = distance + 1; |
+ if (checkDicSize == 0) |
+ { |
+ if (distance >= processedPos) |
+ return SZ_ERROR_DATA; |
+ } |
+ else if (distance >= checkDicSize) |
+ return SZ_ERROR_DATA; |
+ state = (state < kNumStates + kNumLitStates) ? kNumLitStates : kNumLitStates + 3; |
+ } |
+ |
+ len += kMatchMinLen; |
+ |
+ if (limit == dicPos) |
+ return SZ_ERROR_DATA; |
+ { |
+ SizeT rem = limit - dicPos; |
+ unsigned curLen = ((rem < len) ? (unsigned)rem : len); |
+ SizeT pos = (dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0); |
+ |
+ processedPos += curLen; |
+ |
+ len -= curLen; |
+ if (pos + curLen <= dicBufSize) |
+ { |
+ Byte *dest = dic + dicPos; |
+ ptrdiff_t src = (ptrdiff_t)pos - (ptrdiff_t)dicPos; |
+ const Byte *lim = dest + curLen; |
+ dicPos += curLen; |
+ do |
+ *(dest) = (Byte)*(dest + src); |
+ while (++dest != lim); |
+ } |
+ else |
+ { |
+ do |
+ { |
+ dic[dicPos++] = dic[pos]; |
+ if (++pos == dicBufSize) |
+ pos = 0; |
+ } |
+ while (--curLen != 0); |
+ } |
+ } |
+ } |
+ } |
+ while (dicPos < limit && buf < bufLimit); |
+ NORMALIZE; |
+ p->buf = buf; |
+ p->range = range; |
+ p->code = code; |
+ p->remainLen = len; |
+ p->dicPos = dicPos; |
+ p->processedPos = processedPos; |
+ p->reps[0] = rep0; |
+ p->reps[1] = rep1; |
+ p->reps[2] = rep2; |
+ p->reps[3] = rep3; |
+ p->state = state; |
+ |
+ return SZ_OK; |
+} |
+ |
+static void MY_FAST_CALL LzmaDec_WriteRem(CLzmaDec *p, SizeT limit) |
+{ |
+ if (p->remainLen != 0 && p->remainLen < kMatchSpecLenStart) |
+ { |
+ Byte *dic = p->dic; |
+ SizeT dicPos = p->dicPos; |
+ SizeT dicBufSize = p->dicBufSize; |
+ unsigned len = p->remainLen; |
+ UInt32 rep0 = p->reps[0]; |
+ if (limit - dicPos < len) |
+ len = (unsigned)(limit - dicPos); |
+ |
+ if (p->checkDicSize == 0 && p->prop.dicSize - p->processedPos <= len) |
+ p->checkDicSize = p->prop.dicSize; |
+ |
+ p->processedPos += len; |
+ p->remainLen -= len; |
+ while (len-- != 0) |
+ { |
+ dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)]; |
+ dicPos++; |
+ } |
+ p->dicPos = dicPos; |
+ } |
+} |
+ |
+static int MY_FAST_CALL LzmaDec_DecodeReal2(CLzmaDec *p, SizeT limit, const Byte *bufLimit) |
+{ |
+ do |
+ { |
+ SizeT limit2 = limit; |
+ if (p->checkDicSize == 0) |
+ { |
+ UInt32 rem = p->prop.dicSize - p->processedPos; |
+ if (limit - p->dicPos > rem) |
+ limit2 = p->dicPos + rem; |
+ } |
+ RINOK(LzmaDec_DecodeReal(p, limit2, bufLimit)); |
+ if (p->processedPos >= p->prop.dicSize) |
+ p->checkDicSize = p->prop.dicSize; |
+ LzmaDec_WriteRem(p, limit); |
+ } |
+ while (p->dicPos < limit && p->buf < bufLimit && p->remainLen < kMatchSpecLenStart); |
+ |
+ if (p->remainLen > kMatchSpecLenStart) |
+ { |
+ p->remainLen = kMatchSpecLenStart; |
+ } |
+ return 0; |
+} |
+ |
+typedef enum |
+{ |
+ DUMMY_ERROR, /* unexpected end of input stream */ |
+ DUMMY_LIT, |
+ DUMMY_MATCH, |
+ DUMMY_REP |
+} ELzmaDummy; |
+ |
+static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, SizeT inSize) |
+{ |
+ UInt32 range = p->range; |
+ UInt32 code = p->code; |
+ const Byte *bufLimit = buf + inSize; |
+ CLzmaProb *probs = p->probs; |
+ unsigned state = p->state; |
+ ELzmaDummy res; |
+ |
+ { |
+ CLzmaProb *prob; |
+ UInt32 bound; |
+ unsigned ttt; |
+ unsigned posState = (p->processedPos) & ((1 << p->prop.pb) - 1); |
+ |
+ prob = probs + IsMatch + (state << kNumPosBitsMax) + posState; |
+ IF_BIT_0_CHECK(prob) |
+ { |
+ UPDATE_0_CHECK |
+ |
+ /* if (bufLimit - buf >= 7) return DUMMY_LIT; */ |
+ |
+ prob = probs + Literal; |
+ if (p->checkDicSize != 0 || p->processedPos != 0) |
+ prob += (LZMA_LIT_SIZE * |
+ ((((p->processedPos) & ((1 << (p->prop.lp)) - 1)) << p->prop.lc) + |
+ (p->dic[(p->dicPos == 0 ? p->dicBufSize : p->dicPos) - 1] >> (8 - p->prop.lc)))); |
+ |
+ if (state < kNumLitStates) |
+ { |
+ unsigned symbol = 1; |
+ do { GET_BIT_CHECK(prob + symbol, symbol) } while (symbol < 0x100); |
+ } |
+ else |
+ { |
+ unsigned matchByte = p->dic[p->dicPos - p->reps[0] + |
+ ((p->dicPos < p->reps[0]) ? p->dicBufSize : 0)]; |
+ unsigned offs = 0x100; |
+ unsigned symbol = 1; |
+ do |
+ { |
+ unsigned bit; |
+ CLzmaProb *probLit; |
+ matchByte <<= 1; |
+ bit = (matchByte & offs); |
+ probLit = prob + offs + bit + symbol; |
+ GET_BIT2_CHECK(probLit, symbol, offs &= ~bit, offs &= bit) |
+ } |
+ while (symbol < 0x100); |
+ } |
+ res = DUMMY_LIT; |
+ } |
+ else |
+ { |
+ unsigned len; |
+ UPDATE_1_CHECK; |
+ |
+ prob = probs + IsRep + state; |
+ IF_BIT_0_CHECK(prob) |
+ { |
+ UPDATE_0_CHECK; |
+ state = 0; |
+ prob = probs + LenCoder; |
+ res = DUMMY_MATCH; |
+ } |
+ else |
+ { |
+ UPDATE_1_CHECK; |
+ res = DUMMY_REP; |
+ prob = probs + IsRepG0 + state; |
+ IF_BIT_0_CHECK(prob) |
+ { |
+ UPDATE_0_CHECK; |
+ prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState; |
+ IF_BIT_0_CHECK(prob) |
+ { |
+ UPDATE_0_CHECK; |
+ NORMALIZE_CHECK; |
+ return DUMMY_REP; |
+ } |
+ else |
+ { |
+ UPDATE_1_CHECK; |
+ } |
+ } |
+ else |
+ { |
+ UPDATE_1_CHECK; |
+ prob = probs + IsRepG1 + state; |
+ IF_BIT_0_CHECK(prob) |
+ { |
+ UPDATE_0_CHECK; |
+ } |
+ else |
+ { |
+ UPDATE_1_CHECK; |
+ prob = probs + IsRepG2 + state; |
+ IF_BIT_0_CHECK(prob) |
+ { |
+ UPDATE_0_CHECK; |
+ } |
+ else |
+ { |
+ UPDATE_1_CHECK; |
+ } |
+ } |
+ } |
+ state = kNumStates; |
+ prob = probs + RepLenCoder; |
+ } |
+ { |
+ unsigned limit, offset; |
+ CLzmaProb *probLen = prob + LenChoice; |
+ IF_BIT_0_CHECK(probLen) |
+ { |
+ UPDATE_0_CHECK; |
+ probLen = prob + LenLow + (posState << kLenNumLowBits); |
+ offset = 0; |
+ limit = 1 << kLenNumLowBits; |
+ } |
+ else |
+ { |
+ UPDATE_1_CHECK; |
+ probLen = prob + LenChoice2; |
+ IF_BIT_0_CHECK(probLen) |
+ { |
+ UPDATE_0_CHECK; |
+ probLen = prob + LenMid + (posState << kLenNumMidBits); |
+ offset = kLenNumLowSymbols; |
+ limit = 1 << kLenNumMidBits; |
+ } |
+ else |
+ { |
+ UPDATE_1_CHECK; |
+ probLen = prob + LenHigh; |
+ offset = kLenNumLowSymbols + kLenNumMidSymbols; |
+ limit = 1 << kLenNumHighBits; |
+ } |
+ } |
+ TREE_DECODE_CHECK(probLen, limit, len); |
+ len += offset; |
+ } |
+ |
+ if (state < 4) |
+ { |
+ unsigned posSlot; |
+ prob = probs + PosSlot + |
+ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << |
+ kNumPosSlotBits); |
+ TREE_DECODE_CHECK(prob, 1 << kNumPosSlotBits, posSlot); |
+ if (posSlot >= kStartPosModelIndex) |
+ { |
+ int numDirectBits = ((posSlot >> 1) - 1); |
+ |
+ /* if (bufLimit - buf >= 8) return DUMMY_MATCH; */ |
+ |
+ if (posSlot < kEndPosModelIndex) |
+ { |
+ prob = probs + SpecPos + ((2 | (posSlot & 1)) << numDirectBits) - posSlot - 1; |
+ } |
+ else |
+ { |
+ numDirectBits -= kNumAlignBits; |
+ do |
+ { |
+ NORMALIZE_CHECK |
+ range >>= 1; |
+ code -= range & (((code - range) >> 31) - 1); |
+ /* if (code >= range) code -= range; */ |
+ } |
+ while (--numDirectBits != 0); |
+ prob = probs + Align; |
+ numDirectBits = kNumAlignBits; |
+ } |
+ { |
+ unsigned i = 1; |
+ do |
+ { |
+ GET_BIT_CHECK(prob + i, i); |
+ } |
+ while (--numDirectBits != 0); |
+ } |
+ } |
+ } |
+ } |
+ } |
+ NORMALIZE_CHECK; |
+ return res; |
+} |
+ |
+ |
+static void LzmaDec_InitRc(CLzmaDec *p, const Byte *data) |
+{ |
+ p->code = ((UInt32)data[1] << 24) | ((UInt32)data[2] << 16) | ((UInt32)data[3] << 8) | ((UInt32)data[4]); |
+ p->range = 0xFFFFFFFF; |
+ p->needFlush = 0; |
+} |
+ |
+void LzmaDec_InitDicAndState(CLzmaDec *p, Bool initDic, Bool initState) |
+{ |
+ p->needFlush = 1; |
+ p->remainLen = 0; |
+ p->tempBufSize = 0; |
+ |
+ if (initDic) |
+ { |
+ p->processedPos = 0; |
+ p->checkDicSize = 0; |
+ p->needInitState = 1; |
+ } |
+ if (initState) |
+ p->needInitState = 1; |
+} |
+ |
+void LzmaDec_Init(CLzmaDec *p) |
+{ |
+ p->dicPos = 0; |
+ LzmaDec_InitDicAndState(p, True, True); |
+} |
+ |
+static void LzmaDec_InitStateReal(CLzmaDec *p) |
+{ |
+ UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (p->prop.lc + p->prop.lp)); |
+ UInt32 i; |
+ CLzmaProb *probs = p->probs; |
+ for (i = 0; i < numProbs; i++) |
+ probs[i] = kBitModelTotal >> 1; |
+ p->reps[0] = p->reps[1] = p->reps[2] = p->reps[3] = 1; |
+ p->state = 0; |
+ p->needInitState = 0; |
+} |
+ |
+SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen, |
+ ELzmaFinishMode finishMode, ELzmaStatus *status) |
+{ |
+ SizeT inSize = *srcLen; |
+ (*srcLen) = 0; |
+ LzmaDec_WriteRem(p, dicLimit); |
+ |
+ *status = LZMA_STATUS_NOT_SPECIFIED; |
+ |
+ while (p->remainLen != kMatchSpecLenStart) |
+ { |
+ int checkEndMarkNow; |
+ |
+ if (p->needFlush != 0) |
+ { |
+ for (; inSize > 0 && p->tempBufSize < RC_INIT_SIZE; (*srcLen)++, inSize--) |
+ p->tempBuf[p->tempBufSize++] = *src++; |
+ if (p->tempBufSize < RC_INIT_SIZE) |
+ { |
+ *status = LZMA_STATUS_NEEDS_MORE_INPUT; |
+ return SZ_OK; |
+ } |
+ if (p->tempBuf[0] != 0) |
+ return SZ_ERROR_DATA; |
+ |
+ LzmaDec_InitRc(p, p->tempBuf); |
+ p->tempBufSize = 0; |
+ } |
+ |
+ checkEndMarkNow = 0; |
+ if (p->dicPos >= dicLimit) |
+ { |
+ if (p->remainLen == 0 && p->code == 0) |
+ { |
+ *status = LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK; |
+ return SZ_OK; |
+ } |
+ if (finishMode == LZMA_FINISH_ANY) |
+ { |
+ *status = LZMA_STATUS_NOT_FINISHED; |
+ return SZ_OK; |
+ } |
+ if (p->remainLen != 0) |
+ { |
+ *status = LZMA_STATUS_NOT_FINISHED; |
+ return SZ_ERROR_DATA; |
+ } |
+ checkEndMarkNow = 1; |
+ } |
+ |
+ if (p->needInitState) |
+ LzmaDec_InitStateReal(p); |
+ |
+ if (p->tempBufSize == 0) |
+ { |
+ SizeT processed; |
+ const Byte *bufLimit; |
+ if (inSize < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow) |
+ { |
+ int dummyRes = LzmaDec_TryDummy(p, src, inSize); |
+ if (dummyRes == DUMMY_ERROR) |
+ { |
+ memcpy(p->tempBuf, src, inSize); |
+ p->tempBufSize = (unsigned)inSize; |
+ (*srcLen) += inSize; |
+ *status = LZMA_STATUS_NEEDS_MORE_INPUT; |
+ return SZ_OK; |
+ } |
+ if (checkEndMarkNow && dummyRes != DUMMY_MATCH) |
+ { |
+ *status = LZMA_STATUS_NOT_FINISHED; |
+ return SZ_ERROR_DATA; |
+ } |
+ bufLimit = src; |
+ } |
+ else |
+ bufLimit = src + inSize - LZMA_REQUIRED_INPUT_MAX; |
+ p->buf = src; |
+ if (LzmaDec_DecodeReal2(p, dicLimit, bufLimit) != 0) |
+ return SZ_ERROR_DATA; |
+ processed = (SizeT)(p->buf - src); |
+ (*srcLen) += processed; |
+ src += processed; |
+ inSize -= processed; |
+ } |
+ else |
+ { |
+ unsigned rem = p->tempBufSize, lookAhead = 0; |
+ while (rem < LZMA_REQUIRED_INPUT_MAX && lookAhead < inSize) |
+ p->tempBuf[rem++] = src[lookAhead++]; |
+ p->tempBufSize = rem; |
+ if (rem < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow) |
+ { |
+ int dummyRes = LzmaDec_TryDummy(p, p->tempBuf, rem); |
+ if (dummyRes == DUMMY_ERROR) |
+ { |
+ (*srcLen) += lookAhead; |
+ *status = LZMA_STATUS_NEEDS_MORE_INPUT; |
+ return SZ_OK; |
+ } |
+ if (checkEndMarkNow && dummyRes != DUMMY_MATCH) |
+ { |
+ *status = LZMA_STATUS_NOT_FINISHED; |
+ return SZ_ERROR_DATA; |
+ } |
+ } |
+ p->buf = p->tempBuf; |
+ if (LzmaDec_DecodeReal2(p, dicLimit, p->buf) != 0) |
+ return SZ_ERROR_DATA; |
+ lookAhead -= (rem - (unsigned)(p->buf - p->tempBuf)); |
+ (*srcLen) += lookAhead; |
+ src += lookAhead; |
+ inSize -= lookAhead; |
+ p->tempBufSize = 0; |
+ } |
+ } |
+ if (p->code == 0) |
+ *status = LZMA_STATUS_FINISHED_WITH_MARK; |
+ return (p->code == 0) ? SZ_OK : SZ_ERROR_DATA; |
+} |
+ |
+SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status) |
+{ |
+ SizeT outSize = *destLen; |
+ SizeT inSize = *srcLen; |
+ *srcLen = *destLen = 0; |
+ for (;;) |
+ { |
+ SizeT inSizeCur = inSize, outSizeCur, dicPos; |
+ ELzmaFinishMode curFinishMode; |
+ SRes res; |
+ if (p->dicPos == p->dicBufSize) |
+ p->dicPos = 0; |
+ dicPos = p->dicPos; |
+ if (outSize > p->dicBufSize - dicPos) |
+ { |
+ outSizeCur = p->dicBufSize; |
+ curFinishMode = LZMA_FINISH_ANY; |
+ } |
+ else |
+ { |
+ outSizeCur = dicPos + outSize; |
+ curFinishMode = finishMode; |
+ } |
+ |
+ res = LzmaDec_DecodeToDic(p, outSizeCur, src, &inSizeCur, curFinishMode, status); |
+ src += inSizeCur; |
+ inSize -= inSizeCur; |
+ *srcLen += inSizeCur; |
+ outSizeCur = p->dicPos - dicPos; |
+ memcpy(dest, p->dic + dicPos, outSizeCur); |
+ dest += outSizeCur; |
+ outSize -= outSizeCur; |
+ *destLen += outSizeCur; |
+ if (res != 0) |
+ return res; |
+ if (outSizeCur == 0 || outSize == 0) |
+ return SZ_OK; |
+ } |
+} |
+ |
+void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc) |
+{ |
+ alloc->Free(alloc, p->probs); |
+ p->probs = 0; |
+} |
+ |
+static void LzmaDec_FreeDict(CLzmaDec *p, ISzAlloc *alloc) |
+{ |
+ alloc->Free(alloc, p->dic); |
+ p->dic = 0; |
+} |
+ |
+void LzmaDec_Free(CLzmaDec *p, ISzAlloc *alloc) |
+{ |
+ LzmaDec_FreeProbs(p, alloc); |
+ LzmaDec_FreeDict(p, alloc); |
+} |
+ |
+SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size) |
+{ |
+ UInt32 dicSize; |
+ Byte d; |
+ |
+ if (size < LZMA_PROPS_SIZE) |
+ return SZ_ERROR_UNSUPPORTED; |
+ else |
+ dicSize = data[1] | ((UInt32)data[2] << 8) | ((UInt32)data[3] << 16) | ((UInt32)data[4] << 24); |
+ |
+ if (dicSize < LZMA_DIC_MIN) |
+ dicSize = LZMA_DIC_MIN; |
+ p->dicSize = dicSize; |
+ |
+ d = data[0]; |
+ if (d >= (9 * 5 * 5)) |
+ return SZ_ERROR_UNSUPPORTED; |
+ |
+ p->lc = d % 9; |
+ d /= 9; |
+ p->pb = d / 5; |
+ p->lp = d % 5; |
+ |
+ return SZ_OK; |
+} |
+ |
+static SRes LzmaDec_AllocateProbs2(CLzmaDec *p, const CLzmaProps *propNew, ISzAlloc *alloc) |
+{ |
+ UInt32 numProbs = LzmaProps_GetNumProbs(propNew); |
+ if (p->probs == 0 || numProbs != p->numProbs) |
+ { |
+ LzmaDec_FreeProbs(p, alloc); |
+ p->probs = (CLzmaProb *)alloc->Alloc(alloc, numProbs * sizeof(CLzmaProb)); |
+ p->numProbs = numProbs; |
+ if (p->probs == 0) |
+ return SZ_ERROR_MEM; |
+ } |
+ return SZ_OK; |
+} |
+ |
+SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc) |
+{ |
+ CLzmaProps propNew; |
+ RINOK(LzmaProps_Decode(&propNew, props, propsSize)); |
+ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc)); |
+ p->prop = propNew; |
+ return SZ_OK; |
+} |
+ |
+SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc) |
+{ |
+ CLzmaProps propNew; |
+ SizeT dicBufSize; |
+ RINOK(LzmaProps_Decode(&propNew, props, propsSize)); |
+ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc)); |
+ dicBufSize = propNew.dicSize; |
+ if (p->dic == 0 || dicBufSize != p->dicBufSize) |
+ { |
+ LzmaDec_FreeDict(p, alloc); |
+ p->dic = (Byte *)alloc->Alloc(alloc, dicBufSize); |
+ if (p->dic == 0) |
+ { |
+ LzmaDec_FreeProbs(p, alloc); |
+ return SZ_ERROR_MEM; |
+ } |
+ } |
+ p->dicBufSize = dicBufSize; |
+ p->prop = propNew; |
+ return SZ_OK; |
+} |
+ |
+SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, |
+ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode, |
+ ELzmaStatus *status, ISzAlloc *alloc) |
+{ |
+ CLzmaDec p; |
+ SRes res; |
+ SizeT inSize = *srcLen; |
+ SizeT outSize = *destLen; |
+ *srcLen = *destLen = 0; |
+ if (inSize < RC_INIT_SIZE) |
+ return SZ_ERROR_INPUT_EOF; |
+ |
+ LzmaDec_Construct(&p); |
+ res = LzmaDec_AllocateProbs(&p, propData, propSize, alloc); |
+ if (res != 0) |
+ return res; |
+ p.dic = dest; |
+ p.dicBufSize = outSize; |
+ |
+ LzmaDec_Init(&p); |
+ |
+ *srcLen = inSize; |
+ res = LzmaDec_DecodeToDic(&p, outSize, src, srcLen, finishMode, status); |
+ |
+ if (res == SZ_OK && *status == LZMA_STATUS_NEEDS_MORE_INPUT) |
+ res = SZ_ERROR_INPUT_EOF; |
+ |
+ (*destLen) = p.dicPos; |
+ LzmaDec_FreeProbs(&p, alloc); |
+ return res; |
+} |
--- /dev/null |
+++ b/lib/lzma/LzmaEnc.c |
@@ -0,0 +1,2271 @@ |
+/* LzmaEnc.c -- LZMA Encoder |
+2009-11-24 : Igor Pavlov : Public domain */ |
+ |
+#include <string.h> |
+ |
+/* #define SHOW_STAT */ |
+/* #define SHOW_STAT2 */ |
+ |
+#if defined(SHOW_STAT) || defined(SHOW_STAT2) |
+#include <stdio.h> |
+#endif |
+ |
+#include "LzmaEnc.h" |
+ |
+/* disable MT */ |
+#define _7ZIP_ST |
+ |
+#include "LzFind.h" |
+#ifndef _7ZIP_ST |
+#include "LzFindMt.h" |
+#endif |
+ |
+#ifdef SHOW_STAT |
+static int ttt = 0; |
+#endif |
+ |
+#define kBlockSizeMax ((1 << LZMA_NUM_BLOCK_SIZE_BITS) - 1) |
+ |
+#define kBlockSize (9 << 10) |
+#define kUnpackBlockSize (1 << 18) |
+#define kMatchArraySize (1 << 21) |
+#define kMatchRecordMaxSize ((LZMA_MATCH_LEN_MAX * 2 + 3) * LZMA_MATCH_LEN_MAX) |
+ |
+#define kNumMaxDirectBits (31) |
+ |
+#define kNumTopBits 24 |
+#define kTopValue ((UInt32)1 << kNumTopBits) |
+ |
+#define kNumBitModelTotalBits 11 |
+#define kBitModelTotal (1 << kNumBitModelTotalBits) |
+#define kNumMoveBits 5 |
+#define kProbInitValue (kBitModelTotal >> 1) |
+ |
+#define kNumMoveReducingBits 4 |
+#define kNumBitPriceShiftBits 4 |
+#define kBitPrice (1 << kNumBitPriceShiftBits) |
+ |
+void LzmaEncProps_Init(CLzmaEncProps *p) |
+{ |
+ p->level = 5; |
+ p->dictSize = p->mc = 0; |
+ p->lc = p->lp = p->pb = p->algo = p->fb = p->btMode = p->numHashBytes = p->numThreads = -1; |
+ p->writeEndMark = 0; |
+} |
+ |
+void LzmaEncProps_Normalize(CLzmaEncProps *p) |
+{ |
+ int level = p->level; |
+ if (level < 0) level = 5; |
+ p->level = level; |
+ if (p->dictSize == 0) p->dictSize = (level <= 5 ? (1 << (level * 2 + 14)) : (level == 6 ? (1 << 25) : (1 << 26))); |
+ if (p->lc < 0) p->lc = 3; |
+ if (p->lp < 0) p->lp = 0; |
+ if (p->pb < 0) p->pb = 2; |
+ if (p->algo < 0) p->algo = (level < 5 ? 0 : 1); |
+ if (p->fb < 0) p->fb = (level < 7 ? 32 : 64); |
+ if (p->btMode < 0) p->btMode = (p->algo == 0 ? 0 : 1); |
+ if (p->numHashBytes < 0) p->numHashBytes = 4; |
+ if (p->mc == 0) p->mc = (16 + (p->fb >> 1)) >> (p->btMode ? 0 : 1); |
+ if (p->numThreads < 0) |
+ p->numThreads = |
+ #ifndef _7ZIP_ST |
+ ((p->btMode && p->algo) ? 2 : 1); |
+ #else |
+ 1; |
+ #endif |
+} |
+ |
+UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2) |
+{ |
+ CLzmaEncProps props = *props2; |
+ LzmaEncProps_Normalize(&props); |
+ return props.dictSize; |
+} |
+ |
+/* #define LZMA_LOG_BSR */ |
+/* Define it for Intel's CPU */ |
+ |
+ |
+#ifdef LZMA_LOG_BSR |
+ |
+#define kDicLogSizeMaxCompress 30 |
+ |
+#define BSR2_RET(pos, res) { unsigned long i; _BitScanReverse(&i, (pos)); res = (i + i) + ((pos >> (i - 1)) & 1); } |
+ |
+UInt32 GetPosSlot1(UInt32 pos) |
+{ |
+ UInt32 res; |
+ BSR2_RET(pos, res); |
+ return res; |
+} |
+#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); } |
+#define GetPosSlot(pos, res) { if (pos < 2) res = pos; else BSR2_RET(pos, res); } |
+ |
+#else |
+ |
+#define kNumLogBits (9 + (int)sizeof(size_t) / 2) |
+#define kDicLogSizeMaxCompress ((kNumLogBits - 1) * 2 + 7) |
+ |
+void LzmaEnc_FastPosInit(Byte *g_FastPos) |
+{ |
+ int c = 2, slotFast; |
+ g_FastPos[0] = 0; |
+ g_FastPos[1] = 1; |
+ |
+ for (slotFast = 2; slotFast < kNumLogBits * 2; slotFast++) |
+ { |
+ UInt32 k = (1 << ((slotFast >> 1) - 1)); |
+ UInt32 j; |
+ for (j = 0; j < k; j++, c++) |
+ g_FastPos[c] = (Byte)slotFast; |
+ } |
+} |
+ |
+#define BSR2_RET(pos, res) { UInt32 i = 6 + ((kNumLogBits - 1) & \ |
+ (0 - (((((UInt32)1 << (kNumLogBits + 6)) - 1) - pos) >> 31))); \ |
+ res = p->g_FastPos[pos >> i] + (i * 2); } |
+/* |
+#define BSR2_RET(pos, res) { res = (pos < (1 << (kNumLogBits + 6))) ? \ |
+ p->g_FastPos[pos >> 6] + 12 : \ |
+ p->g_FastPos[pos >> (6 + kNumLogBits - 1)] + (6 + (kNumLogBits - 1)) * 2; } |
+*/ |
+ |
+#define GetPosSlot1(pos) p->g_FastPos[pos] |
+#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); } |
+#define GetPosSlot(pos, res) { if (pos < kNumFullDistances) res = p->g_FastPos[pos]; else BSR2_RET(pos, res); } |
+ |
+#endif |
+ |
+ |
+#define LZMA_NUM_REPS 4 |
+ |
+typedef unsigned CState; |
+ |
+typedef struct |
+{ |
+ UInt32 price; |
+ |
+ CState state; |
+ int prev1IsChar; |
+ int prev2; |
+ |
+ UInt32 posPrev2; |
+ UInt32 backPrev2; |
+ |
+ UInt32 posPrev; |
+ UInt32 backPrev; |
+ UInt32 backs[LZMA_NUM_REPS]; |
+} COptimal; |
+ |
+#define kNumOpts (1 << 12) |
+ |
+#define kNumLenToPosStates 4 |
+#define kNumPosSlotBits 6 |
+#define kDicLogSizeMin 0 |
+#define kDicLogSizeMax 32 |
+#define kDistTableSizeMax (kDicLogSizeMax * 2) |
+ |
+ |
+#define kNumAlignBits 4 |
+#define kAlignTableSize (1 << kNumAlignBits) |
+#define kAlignMask (kAlignTableSize - 1) |
+ |
+#define kStartPosModelIndex 4 |
+#define kEndPosModelIndex 14 |
+#define kNumPosModels (kEndPosModelIndex - kStartPosModelIndex) |
+ |
+#define kNumFullDistances (1 << (kEndPosModelIndex >> 1)) |
+ |
+#ifdef _LZMA_PROB32 |
+#define CLzmaProb UInt32 |
+#else |
+#define CLzmaProb UInt16 |
+#endif |
+ |
+#define LZMA_PB_MAX 4 |
+#define LZMA_LC_MAX 8 |
+#define LZMA_LP_MAX 4 |
+ |
+#define LZMA_NUM_PB_STATES_MAX (1 << LZMA_PB_MAX) |
+ |
+ |
+#define kLenNumLowBits 3 |
+#define kLenNumLowSymbols (1 << kLenNumLowBits) |
+#define kLenNumMidBits 3 |
+#define kLenNumMidSymbols (1 << kLenNumMidBits) |
+#define kLenNumHighBits 8 |
+#define kLenNumHighSymbols (1 << kLenNumHighBits) |
+ |
+#define kLenNumSymbolsTotal (kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols) |
+ |
+#define LZMA_MATCH_LEN_MIN 2 |
+#define LZMA_MATCH_LEN_MAX (LZMA_MATCH_LEN_MIN + kLenNumSymbolsTotal - 1) |
+ |
+#define kNumStates 12 |
+ |
+typedef struct |
+{ |
+ CLzmaProb choice; |
+ CLzmaProb choice2; |
+ CLzmaProb low[LZMA_NUM_PB_STATES_MAX << kLenNumLowBits]; |
+ CLzmaProb mid[LZMA_NUM_PB_STATES_MAX << kLenNumMidBits]; |
+ CLzmaProb high[kLenNumHighSymbols]; |
+} CLenEnc; |
+ |
+typedef struct |
+{ |
+ CLenEnc p; |
+ UInt32 prices[LZMA_NUM_PB_STATES_MAX][kLenNumSymbolsTotal]; |
+ UInt32 tableSize; |
+ UInt32 counters[LZMA_NUM_PB_STATES_MAX]; |
+} CLenPriceEnc; |
+ |
+typedef struct |
+{ |
+ UInt32 range; |
+ Byte cache; |
+ UInt64 low; |
+ UInt64 cacheSize; |
+ Byte *buf; |
+ Byte *bufLim; |
+ Byte *bufBase; |
+ ISeqOutStream *outStream; |
+ UInt64 processed; |
+ SRes res; |
+} CRangeEnc; |
+ |
+typedef struct |
+{ |
+ CLzmaProb *litProbs; |
+ |
+ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX]; |
+ CLzmaProb isRep[kNumStates]; |
+ CLzmaProb isRepG0[kNumStates]; |
+ CLzmaProb isRepG1[kNumStates]; |
+ CLzmaProb isRepG2[kNumStates]; |
+ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX]; |
+ |
+ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits]; |
+ CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex]; |
+ CLzmaProb posAlignEncoder[1 << kNumAlignBits]; |
+ |
+ CLenPriceEnc lenEnc; |
+ CLenPriceEnc repLenEnc; |
+ |
+ UInt32 reps[LZMA_NUM_REPS]; |
+ UInt32 state; |
+} CSaveState; |
+ |
+typedef struct |
+{ |
+ IMatchFinder matchFinder; |
+ void *matchFinderObj; |
+ |
+ #ifndef _7ZIP_ST |
+ Bool mtMode; |
+ CMatchFinderMt matchFinderMt; |
+ #endif |
+ |
+ CMatchFinder matchFinderBase; |
+ |
+ #ifndef _7ZIP_ST |
+ Byte pad[128]; |
+ #endif |
+ |
+ UInt32 optimumEndIndex; |
+ UInt32 optimumCurrentIndex; |
+ |
+ UInt32 longestMatchLength; |
+ UInt32 numPairs; |
+ UInt32 numAvail; |
+ COptimal opt[kNumOpts]; |
+ |
+ #ifndef LZMA_LOG_BSR |
+ Byte g_FastPos[1 << kNumLogBits]; |
+ #endif |
+ |
+ UInt32 ProbPrices[kBitModelTotal >> kNumMoveReducingBits]; |
+ UInt32 matches[LZMA_MATCH_LEN_MAX * 2 + 2 + 1]; |
+ UInt32 numFastBytes; |
+ UInt32 additionalOffset; |
+ UInt32 reps[LZMA_NUM_REPS]; |
+ UInt32 state; |
+ |
+ UInt32 posSlotPrices[kNumLenToPosStates][kDistTableSizeMax]; |
+ UInt32 distancesPrices[kNumLenToPosStates][kNumFullDistances]; |
+ UInt32 alignPrices[kAlignTableSize]; |
+ UInt32 alignPriceCount; |
+ |
+ UInt32 distTableSize; |
+ |
+ unsigned lc, lp, pb; |
+ unsigned lpMask, pbMask; |
+ |
+ CLzmaProb *litProbs; |
+ |
+ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX]; |
+ CLzmaProb isRep[kNumStates]; |
+ CLzmaProb isRepG0[kNumStates]; |
+ CLzmaProb isRepG1[kNumStates]; |
+ CLzmaProb isRepG2[kNumStates]; |
+ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX]; |
+ |
+ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits]; |
+ CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex]; |
+ CLzmaProb posAlignEncoder[1 << kNumAlignBits]; |
+ |
+ CLenPriceEnc lenEnc; |
+ CLenPriceEnc repLenEnc; |
+ |
+ unsigned lclp; |
+ |
+ Bool fastMode; |
+ |
+ CRangeEnc rc; |
+ |
+ Bool writeEndMark; |
+ UInt64 nowPos64; |
+ UInt32 matchPriceCount; |
+ Bool finished; |
+ Bool multiThread; |
+ |
+ SRes result; |
+ UInt32 dictSize; |
+ UInt32 matchFinderCycles; |
+ |
+ int needInit; |
+ |
+ CSaveState saveState; |
+} CLzmaEnc; |
+ |
+void LzmaEnc_SaveState(CLzmaEncHandle pp) |
+{ |
+ CLzmaEnc *p = (CLzmaEnc *)pp; |
+ CSaveState *dest = &p->saveState; |
+ int i; |
+ dest->lenEnc = p->lenEnc; |
+ dest->repLenEnc = p->repLenEnc; |
+ dest->state = p->state; |
+ |
+ for (i = 0; i < kNumStates; i++) |
+ { |
+ memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i])); |
+ memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i])); |
+ } |
+ for (i = 0; i < kNumLenToPosStates; i++) |
+ memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i])); |
+ memcpy(dest->isRep, p->isRep, sizeof(p->isRep)); |
+ memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0)); |
+ memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1)); |
+ memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2)); |
+ memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders)); |
+ memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder)); |
+ memcpy(dest->reps, p->reps, sizeof(p->reps)); |
+ memcpy(dest->litProbs, p->litProbs, (0x300 << p->lclp) * sizeof(CLzmaProb)); |
+} |
+ |
+void LzmaEnc_RestoreState(CLzmaEncHandle pp) |
+{ |
+ CLzmaEnc *dest = (CLzmaEnc *)pp; |
+ const CSaveState *p = &dest->saveState; |
+ int i; |
+ dest->lenEnc = p->lenEnc; |
+ dest->repLenEnc = p->repLenEnc; |
+ dest->state = p->state; |
+ |
+ for (i = 0; i < kNumStates; i++) |
+ { |
+ memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i])); |
+ memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i])); |
+ } |
+ for (i = 0; i < kNumLenToPosStates; i++) |
+ memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i])); |
+ memcpy(dest->isRep, p->isRep, sizeof(p->isRep)); |
+ memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0)); |
+ memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1)); |
+ memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2)); |
+ memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders)); |
+ memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder)); |
+ memcpy(dest->reps, p->reps, sizeof(p->reps)); |
+ memcpy(dest->litProbs, p->litProbs, (0x300 << dest->lclp) * sizeof(CLzmaProb)); |
+} |
+ |
+SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2) |
+{ |
+ CLzmaEnc *p = (CLzmaEnc *)pp; |
+ CLzmaEncProps props = *props2; |
+ LzmaEncProps_Normalize(&props); |
+ |
+ if (props.lc > LZMA_LC_MAX || props.lp > LZMA_LP_MAX || props.pb > LZMA_PB_MAX || |
+ props.dictSize > (1 << kDicLogSizeMaxCompress) || props.dictSize > (1 << 30)) |
+ return SZ_ERROR_PARAM; |
+ p->dictSize = props.dictSize; |
+ p->matchFinderCycles = props.mc; |
+ { |
+ unsigned fb = props.fb; |
+ if (fb < 5) |
+ fb = 5; |
+ if (fb > LZMA_MATCH_LEN_MAX) |
+ fb = LZMA_MATCH_LEN_MAX; |
+ p->numFastBytes = fb; |
+ } |
+ p->lc = props.lc; |
+ p->lp = props.lp; |
+ p->pb = props.pb; |
+ p->fastMode = (props.algo == 0); |
+ p->matchFinderBase.btMode = props.btMode; |
+ { |
+ UInt32 numHashBytes = 4; |
+ if (props.btMode) |
+ { |
+ if (props.numHashBytes < 2) |
+ numHashBytes = 2; |
+ else if (props.numHashBytes < 4) |
+ numHashBytes = props.numHashBytes; |
+ } |
+ p->matchFinderBase.numHashBytes = numHashBytes; |
+ } |
+ |
+ p->matchFinderBase.cutValue = props.mc; |
+ |
+ p->writeEndMark = props.writeEndMark; |
+ |
+ #ifndef _7ZIP_ST |
+ /* |
+ if (newMultiThread != _multiThread) |
+ { |
+ ReleaseMatchFinder(); |
+ _multiThread = newMultiThread; |
+ } |
+ */ |
+ p->multiThread = (props.numThreads > 1); |
+ #endif |
+ |
+ return SZ_OK; |
+} |
+ |
+static const int kLiteralNextStates[kNumStates] = {0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 4, 5}; |
+static const int kMatchNextStates[kNumStates] = {7, 7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10}; |
+static const int kRepNextStates[kNumStates] = {8, 8, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11}; |
+static const int kShortRepNextStates[kNumStates]= {9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11}; |
+ |
+#define IsCharState(s) ((s) < 7) |
+ |
+#define GetLenToPosState(len) (((len) < kNumLenToPosStates + 1) ? (len) - 2 : kNumLenToPosStates - 1) |
+ |
+#define kInfinityPrice (1 << 30) |
+ |
+static void RangeEnc_Construct(CRangeEnc *p) |
+{ |
+ p->outStream = 0; |
+ p->bufBase = 0; |
+} |
+ |
+#define RangeEnc_GetProcessed(p) ((p)->processed + ((p)->buf - (p)->bufBase) + (p)->cacheSize) |
+ |
+#define RC_BUF_SIZE (1 << 16) |
+static int RangeEnc_Alloc(CRangeEnc *p, ISzAlloc *alloc) |
+{ |
+ if (p->bufBase == 0) |
+ { |
+ p->bufBase = (Byte *)alloc->Alloc(alloc, RC_BUF_SIZE); |
+ if (p->bufBase == 0) |
+ return 0; |
+ p->bufLim = p->bufBase + RC_BUF_SIZE; |
+ } |
+ return 1; |
+} |
+ |
+static void RangeEnc_Free(CRangeEnc *p, ISzAlloc *alloc) |
+{ |
+ alloc->Free(alloc, p->bufBase); |
+ p->bufBase = 0; |
+} |
+ |
+static void RangeEnc_Init(CRangeEnc *p) |
+{ |
+ /* Stream.Init(); */ |
+ p->low = 0; |
+ p->range = 0xFFFFFFFF; |
+ p->cacheSize = 1; |
+ p->cache = 0; |
+ |
+ p->buf = p->bufBase; |
+ |
+ p->processed = 0; |
+ p->res = SZ_OK; |
+} |
+ |
+static void RangeEnc_FlushStream(CRangeEnc *p) |
+{ |
+ size_t num; |
+ if (p->res != SZ_OK) |
+ return; |
+ num = p->buf - p->bufBase; |
+ if (num != p->outStream->Write(p->outStream, p->bufBase, num)) |
+ p->res = SZ_ERROR_WRITE; |
+ p->processed += num; |
+ p->buf = p->bufBase; |
+} |
+ |
+static void MY_FAST_CALL RangeEnc_ShiftLow(CRangeEnc *p) |
+{ |
+ if ((UInt32)p->low < (UInt32)0xFF000000 || (int)(p->low >> 32) != 0) |
+ { |
+ Byte temp = p->cache; |
+ do |
+ { |
+ Byte *buf = p->buf; |
+ *buf++ = (Byte)(temp + (Byte)(p->low >> 32)); |
+ p->buf = buf; |
+ if (buf == p->bufLim) |
+ RangeEnc_FlushStream(p); |
+ temp = 0xFF; |
+ } |
+ while (--p->cacheSize != 0); |
+ p->cache = (Byte)((UInt32)p->low >> 24); |
+ } |
+ p->cacheSize++; |
+ p->low = (UInt32)p->low << 8; |
+} |
+ |
+static void RangeEnc_FlushData(CRangeEnc *p) |
+{ |
+ int i; |
+ for (i = 0; i < 5; i++) |
+ RangeEnc_ShiftLow(p); |
+} |
+ |
+static void RangeEnc_EncodeDirectBits(CRangeEnc *p, UInt32 value, int numBits) |
+{ |
+ do |
+ { |
+ p->range >>= 1; |
+ p->low += p->range & (0 - ((value >> --numBits) & 1)); |
+ if (p->range < kTopValue) |
+ { |
+ p->range <<= 8; |
+ RangeEnc_ShiftLow(p); |
+ } |
+ } |
+ while (numBits != 0); |
+} |
+ |
+static void RangeEnc_EncodeBit(CRangeEnc *p, CLzmaProb *prob, UInt32 symbol) |
+{ |
+ UInt32 ttt = *prob; |
+ UInt32 newBound = (p->range >> kNumBitModelTotalBits) * ttt; |
+ if (symbol == 0) |
+ { |
+ p->range = newBound; |
+ ttt += (kBitModelTotal - ttt) >> kNumMoveBits; |
+ } |
+ else |
+ { |
+ p->low += newBound; |
+ p->range -= newBound; |
+ ttt -= ttt >> kNumMoveBits; |
+ } |
+ *prob = (CLzmaProb)ttt; |
+ if (p->range < kTopValue) |
+ { |
+ p->range <<= 8; |
+ RangeEnc_ShiftLow(p); |
+ } |
+} |
+ |
+static void LitEnc_Encode(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol) |
+{ |
+ symbol |= 0x100; |
+ do |
+ { |
+ RangeEnc_EncodeBit(p, probs + (symbol >> 8), (symbol >> 7) & 1); |
+ symbol <<= 1; |
+ } |
+ while (symbol < 0x10000); |
+} |
+ |
+static void LitEnc_EncodeMatched(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol, UInt32 matchByte) |
+{ |
+ UInt32 offs = 0x100; |
+ symbol |= 0x100; |
+ do |
+ { |
+ matchByte <<= 1; |
+ RangeEnc_EncodeBit(p, probs + (offs + (matchByte & offs) + (symbol >> 8)), (symbol >> 7) & 1); |
+ symbol <<= 1; |
+ offs &= ~(matchByte ^ symbol); |
+ } |
+ while (symbol < 0x10000); |
+} |
+ |
+void LzmaEnc_InitPriceTables(UInt32 *ProbPrices) |
+{ |
+ UInt32 i; |
+ for (i = (1 << kNumMoveReducingBits) / 2; i < kBitModelTotal; i += (1 << kNumMoveReducingBits)) |
+ { |
+ const int kCyclesBits = kNumBitPriceShiftBits; |
+ UInt32 w = i; |
+ UInt32 bitCount = 0; |
+ int j; |
+ for (j = 0; j < kCyclesBits; j++) |
+ { |
+ w = w * w; |
+ bitCount <<= 1; |
+ while (w >= ((UInt32)1 << 16)) |
+ { |
+ w >>= 1; |
+ bitCount++; |
+ } |
+ } |
+ ProbPrices[i >> kNumMoveReducingBits] = ((kNumBitModelTotalBits << kCyclesBits) - 15 - bitCount); |
+ } |
+} |
+ |
+ |
+#define GET_PRICE(prob, symbol) \ |
+ p->ProbPrices[((prob) ^ (((-(int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits]; |
+ |
+#define GET_PRICEa(prob, symbol) \ |
+ ProbPrices[((prob) ^ ((-((int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits]; |
+ |
+#define GET_PRICE_0(prob) p->ProbPrices[(prob) >> kNumMoveReducingBits] |
+#define GET_PRICE_1(prob) p->ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits] |
+ |
+#define GET_PRICE_0a(prob) ProbPrices[(prob) >> kNumMoveReducingBits] |
+#define GET_PRICE_1a(prob) ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits] |
+ |
+static UInt32 LitEnc_GetPrice(const CLzmaProb *probs, UInt32 symbol, UInt32 *ProbPrices) |
+{ |
+ UInt32 price = 0; |
+ symbol |= 0x100; |
+ do |
+ { |
+ price += GET_PRICEa(probs[symbol >> 8], (symbol >> 7) & 1); |
+ symbol <<= 1; |
+ } |
+ while (symbol < 0x10000); |
+ return price; |
+} |
+ |
+static UInt32 LitEnc_GetPriceMatched(const CLzmaProb *probs, UInt32 symbol, UInt32 matchByte, UInt32 *ProbPrices) |
+{ |
+ UInt32 price = 0; |
+ UInt32 offs = 0x100; |
+ symbol |= 0x100; |
+ do |
+ { |
+ matchByte <<= 1; |
+ price += GET_PRICEa(probs[offs + (matchByte & offs) + (symbol >> 8)], (symbol >> 7) & 1); |
+ symbol <<= 1; |
+ offs &= ~(matchByte ^ symbol); |
+ } |
+ while (symbol < 0x10000); |
+ return price; |
+} |
+ |
+ |
+static void RcTree_Encode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol) |
+{ |
+ UInt32 m = 1; |
+ int i; |
+ for (i = numBitLevels; i != 0;) |
+ { |
+ UInt32 bit; |
+ i--; |
+ bit = (symbol >> i) & 1; |
+ RangeEnc_EncodeBit(rc, probs + m, bit); |
+ m = (m << 1) | bit; |
+ } |
+} |
+ |
+static void RcTree_ReverseEncode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol) |
+{ |
+ UInt32 m = 1; |
+ int i; |
+ for (i = 0; i < numBitLevels; i++) |
+ { |
+ UInt32 bit = symbol & 1; |
+ RangeEnc_EncodeBit(rc, probs + m, bit); |
+ m = (m << 1) | bit; |
+ symbol >>= 1; |
+ } |
+} |
+ |
+static UInt32 RcTree_GetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices) |
+{ |
+ UInt32 price = 0; |
+ symbol |= (1 << numBitLevels); |
+ while (symbol != 1) |
+ { |
+ price += GET_PRICEa(probs[symbol >> 1], symbol & 1); |
+ symbol >>= 1; |
+ } |
+ return price; |
+} |
+ |
+static UInt32 RcTree_ReverseGetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices) |
+{ |
+ UInt32 price = 0; |
+ UInt32 m = 1; |
+ int i; |
+ for (i = numBitLevels; i != 0; i--) |
+ { |
+ UInt32 bit = symbol & 1; |
+ symbol >>= 1; |
+ price += GET_PRICEa(probs[m], bit); |
+ m = (m << 1) | bit; |
+ } |
+ return price; |
+} |
+ |
+ |
+static void LenEnc_Init(CLenEnc *p) |
+{ |
+ unsigned i; |
+ p->choice = p->choice2 = kProbInitValue; |
+ for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumLowBits); i++) |
+ p->low[i] = kProbInitValue; |
+ for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumMidBits); i++) |
+ p->mid[i] = kProbInitValue; |
+ for (i = 0; i < kLenNumHighSymbols; i++) |
+ p->high[i] = kProbInitValue; |
+} |
+ |
+static void LenEnc_Encode(CLenEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState) |
+{ |
+ if (symbol < kLenNumLowSymbols) |
+ { |
+ RangeEnc_EncodeBit(rc, &p->choice, 0); |
+ RcTree_Encode(rc, p->low + (posState << kLenNumLowBits), kLenNumLowBits, symbol); |
+ } |
+ else |
+ { |
+ RangeEnc_EncodeBit(rc, &p->choice, 1); |
+ if (symbol < kLenNumLowSymbols + kLenNumMidSymbols) |
+ { |
+ RangeEnc_EncodeBit(rc, &p->choice2, 0); |
+ RcTree_Encode(rc, p->mid + (posState << kLenNumMidBits), kLenNumMidBits, symbol - kLenNumLowSymbols); |
+ } |
+ else |
+ { |
+ RangeEnc_EncodeBit(rc, &p->choice2, 1); |
+ RcTree_Encode(rc, p->high, kLenNumHighBits, symbol - kLenNumLowSymbols - kLenNumMidSymbols); |
+ } |
+ } |
+} |
+ |
+static void LenEnc_SetPrices(CLenEnc *p, UInt32 posState, UInt32 numSymbols, UInt32 *prices, UInt32 *ProbPrices) |
+{ |
+ UInt32 a0 = GET_PRICE_0a(p->choice); |
+ UInt32 a1 = GET_PRICE_1a(p->choice); |
+ UInt32 b0 = a1 + GET_PRICE_0a(p->choice2); |
+ UInt32 b1 = a1 + GET_PRICE_1a(p->choice2); |
+ UInt32 i = 0; |
+ for (i = 0; i < kLenNumLowSymbols; i++) |
+ { |
+ if (i >= numSymbols) |
+ return; |
+ prices[i] = a0 + RcTree_GetPrice(p->low + (posState << kLenNumLowBits), kLenNumLowBits, i, ProbPrices); |
+ } |
+ for (; i < kLenNumLowSymbols + kLenNumMidSymbols; i++) |
+ { |
+ if (i >= numSymbols) |
+ return; |
+ prices[i] = b0 + RcTree_GetPrice(p->mid + (posState << kLenNumMidBits), kLenNumMidBits, i - kLenNumLowSymbols, ProbPrices); |
+ } |
+ for (; i < numSymbols; i++) |
+ prices[i] = b1 + RcTree_GetPrice(p->high, kLenNumHighBits, i - kLenNumLowSymbols - kLenNumMidSymbols, ProbPrices); |
+} |
+ |
+static void MY_FAST_CALL LenPriceEnc_UpdateTable(CLenPriceEnc *p, UInt32 posState, UInt32 *ProbPrices) |
+{ |
+ LenEnc_SetPrices(&p->p, posState, p->tableSize, p->prices[posState], ProbPrices); |
+ p->counters[posState] = p->tableSize; |
+} |
+ |
+static void LenPriceEnc_UpdateTables(CLenPriceEnc *p, UInt32 numPosStates, UInt32 *ProbPrices) |
+{ |
+ UInt32 posState; |
+ for (posState = 0; posState < numPosStates; posState++) |
+ LenPriceEnc_UpdateTable(p, posState, ProbPrices); |
+} |
+ |
+static void LenEnc_Encode2(CLenPriceEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState, Bool updatePrice, UInt32 *ProbPrices) |
+{ |
+ LenEnc_Encode(&p->p, rc, symbol, posState); |
+ if (updatePrice) |
+ if (--p->counters[posState] == 0) |
+ LenPriceEnc_UpdateTable(p, posState, ProbPrices); |
+} |
+ |
+ |
+ |
+ |
+static void MovePos(CLzmaEnc *p, UInt32 num) |
+{ |
+ #ifdef SHOW_STAT |
+ ttt += num; |
+ printf("\n MovePos %d", num); |
+ #endif |
+ if (num != 0) |
+ { |
+ p->additionalOffset += num; |
+ p->matchFinder.Skip(p->matchFinderObj, num); |
+ } |
+} |
+ |
+static UInt32 ReadMatchDistances(CLzmaEnc *p, UInt32 *numDistancePairsRes) |
+{ |
+ UInt32 lenRes = 0, numPairs; |
+ p->numAvail = p->matchFinder.GetNumAvailableBytes(p->matchFinderObj); |
+ numPairs = p->matchFinder.GetMatches(p->matchFinderObj, p->matches); |
+ #ifdef SHOW_STAT |
+ printf("\n i = %d numPairs = %d ", ttt, numPairs / 2); |
+ ttt++; |
+ { |
+ UInt32 i; |
+ for (i = 0; i < numPairs; i += 2) |
+ printf("%2d %6d | ", p->matches[i], p->matches[i + 1]); |
+ } |
+ #endif |
+ if (numPairs > 0) |
+ { |
+ lenRes = p->matches[numPairs - 2]; |
+ if (lenRes == p->numFastBytes) |
+ { |
+ const Byte *pby = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; |
+ UInt32 distance = p->matches[numPairs - 1] + 1; |
+ UInt32 numAvail = p->numAvail; |
+ if (numAvail > LZMA_MATCH_LEN_MAX) |
+ numAvail = LZMA_MATCH_LEN_MAX; |
+ { |
+ const Byte *pby2 = pby - distance; |
+ for (; lenRes < numAvail && pby[lenRes] == pby2[lenRes]; lenRes++); |
+ } |
+ } |
+ } |
+ p->additionalOffset++; |
+ *numDistancePairsRes = numPairs; |
+ return lenRes; |
+} |
+ |
+ |
+#define MakeAsChar(p) (p)->backPrev = (UInt32)(-1); (p)->prev1IsChar = False; |
+#define MakeAsShortRep(p) (p)->backPrev = 0; (p)->prev1IsChar = False; |
+#define IsShortRep(p) ((p)->backPrev == 0) |
+ |
+static UInt32 GetRepLen1Price(CLzmaEnc *p, UInt32 state, UInt32 posState) |
+{ |
+ return |
+ GET_PRICE_0(p->isRepG0[state]) + |
+ GET_PRICE_0(p->isRep0Long[state][posState]); |
+} |
+ |
+static UInt32 GetPureRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 state, UInt32 posState) |
+{ |
+ UInt32 price; |
+ if (repIndex == 0) |
+ { |
+ price = GET_PRICE_0(p->isRepG0[state]); |
+ price += GET_PRICE_1(p->isRep0Long[state][posState]); |
+ } |
+ else |
+ { |
+ price = GET_PRICE_1(p->isRepG0[state]); |
+ if (repIndex == 1) |
+ price += GET_PRICE_0(p->isRepG1[state]); |
+ else |
+ { |
+ price += GET_PRICE_1(p->isRepG1[state]); |
+ price += GET_PRICE(p->isRepG2[state], repIndex - 2); |
+ } |
+ } |
+ return price; |
+} |
+ |
+static UInt32 GetRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 len, UInt32 state, UInt32 posState) |
+{ |
+ return p->repLenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN] + |
+ GetPureRepPrice(p, repIndex, state, posState); |
+} |
+ |
+static UInt32 Backward(CLzmaEnc *p, UInt32 *backRes, UInt32 cur) |
+{ |
+ UInt32 posMem = p->opt[cur].posPrev; |
+ UInt32 backMem = p->opt[cur].backPrev; |
+ p->optimumEndIndex = cur; |
+ do |
+ { |
+ if (p->opt[cur].prev1IsChar) |
+ { |
+ MakeAsChar(&p->opt[posMem]) |
+ p->opt[posMem].posPrev = posMem - 1; |
+ if (p->opt[cur].prev2) |
+ { |
+ p->opt[posMem - 1].prev1IsChar = False; |
+ p->opt[posMem - 1].posPrev = p->opt[cur].posPrev2; |
+ p->opt[posMem - 1].backPrev = p->opt[cur].backPrev2; |
+ } |
+ } |
+ { |
+ UInt32 posPrev = posMem; |
+ UInt32 backCur = backMem; |
+ |
+ backMem = p->opt[posPrev].backPrev; |
+ posMem = p->opt[posPrev].posPrev; |
+ |
+ p->opt[posPrev].backPrev = backCur; |
+ p->opt[posPrev].posPrev = cur; |
+ cur = posPrev; |
+ } |
+ } |
+ while (cur != 0); |
+ *backRes = p->opt[0].backPrev; |
+ p->optimumCurrentIndex = p->opt[0].posPrev; |
+ return p->optimumCurrentIndex; |
+} |
+ |
+#define LIT_PROBS(pos, prevByte) (p->litProbs + ((((pos) & p->lpMask) << p->lc) + ((prevByte) >> (8 - p->lc))) * 0x300) |
+ |
+static UInt32 GetOptimum(CLzmaEnc *p, UInt32 position, UInt32 *backRes) |
+{ |
+ UInt32 numAvail, mainLen, numPairs, repMaxIndex, i, posState, lenEnd, len, cur; |
+ UInt32 matchPrice, repMatchPrice, normalMatchPrice; |
+ UInt32 reps[LZMA_NUM_REPS], repLens[LZMA_NUM_REPS]; |
+ UInt32 *matches; |
+ const Byte *data; |
+ Byte curByte, matchByte; |
+ if (p->optimumEndIndex != p->optimumCurrentIndex) |
+ { |
+ const COptimal *opt = &p->opt[p->optimumCurrentIndex]; |
+ UInt32 lenRes = opt->posPrev - p->optimumCurrentIndex; |
+ *backRes = opt->backPrev; |
+ p->optimumCurrentIndex = opt->posPrev; |
+ return lenRes; |
+ } |
+ p->optimumCurrentIndex = p->optimumEndIndex = 0; |
+ |
+ if (p->additionalOffset == 0) |
+ mainLen = ReadMatchDistances(p, &numPairs); |
+ else |
+ { |
+ mainLen = p->longestMatchLength; |
+ numPairs = p->numPairs; |
+ } |
+ |
+ numAvail = p->numAvail; |
+ if (numAvail < 2) |
+ { |
+ *backRes = (UInt32)(-1); |
+ return 1; |
+ } |
+ if (numAvail > LZMA_MATCH_LEN_MAX) |
+ numAvail = LZMA_MATCH_LEN_MAX; |
+ |
+ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; |
+ repMaxIndex = 0; |
+ for (i = 0; i < LZMA_NUM_REPS; i++) |
+ { |
+ UInt32 lenTest; |
+ const Byte *data2; |
+ reps[i] = p->reps[i]; |
+ data2 = data - (reps[i] + 1); |
+ if (data[0] != data2[0] || data[1] != data2[1]) |
+ { |
+ repLens[i] = 0; |
+ continue; |
+ } |
+ for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++); |
+ repLens[i] = lenTest; |
+ if (lenTest > repLens[repMaxIndex]) |
+ repMaxIndex = i; |
+ } |
+ if (repLens[repMaxIndex] >= p->numFastBytes) |
+ { |
+ UInt32 lenRes; |
+ *backRes = repMaxIndex; |
+ lenRes = repLens[repMaxIndex]; |
+ MovePos(p, lenRes - 1); |
+ return lenRes; |
+ } |
+ |
+ matches = p->matches; |
+ if (mainLen >= p->numFastBytes) |
+ { |
+ *backRes = matches[numPairs - 1] + LZMA_NUM_REPS; |
+ MovePos(p, mainLen - 1); |
+ return mainLen; |
+ } |
+ curByte = *data; |
+ matchByte = *(data - (reps[0] + 1)); |
+ |
+ if (mainLen < 2 && curByte != matchByte && repLens[repMaxIndex] < 2) |
+ { |
+ *backRes = (UInt32)-1; |
+ return 1; |
+ } |
+ |
+ p->opt[0].state = (CState)p->state; |
+ |
+ posState = (position & p->pbMask); |
+ |
+ { |
+ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1)); |
+ p->opt[1].price = GET_PRICE_0(p->isMatch[p->state][posState]) + |
+ (!IsCharState(p->state) ? |
+ LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) : |
+ LitEnc_GetPrice(probs, curByte, p->ProbPrices)); |
+ } |
+ |
+ MakeAsChar(&p->opt[1]); |
+ |
+ matchPrice = GET_PRICE_1(p->isMatch[p->state][posState]); |
+ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[p->state]); |
+ |
+ if (matchByte == curByte) |
+ { |
+ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, p->state, posState); |
+ if (shortRepPrice < p->opt[1].price) |
+ { |
+ p->opt[1].price = shortRepPrice; |
+ MakeAsShortRep(&p->opt[1]); |
+ } |
+ } |
+ lenEnd = ((mainLen >= repLens[repMaxIndex]) ? mainLen : repLens[repMaxIndex]); |
+ |
+ if (lenEnd < 2) |
+ { |
+ *backRes = p->opt[1].backPrev; |
+ return 1; |
+ } |
+ |
+ p->opt[1].posPrev = 0; |
+ for (i = 0; i < LZMA_NUM_REPS; i++) |
+ p->opt[0].backs[i] = reps[i]; |
+ |
+ len = lenEnd; |
+ do |
+ p->opt[len--].price = kInfinityPrice; |
+ while (len >= 2); |
+ |
+ for (i = 0; i < LZMA_NUM_REPS; i++) |
+ { |
+ UInt32 repLen = repLens[i]; |
+ UInt32 price; |
+ if (repLen < 2) |
+ continue; |
+ price = repMatchPrice + GetPureRepPrice(p, i, p->state, posState); |
+ do |
+ { |
+ UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][repLen - 2]; |
+ COptimal *opt = &p->opt[repLen]; |
+ if (curAndLenPrice < opt->price) |
+ { |
+ opt->price = curAndLenPrice; |
+ opt->posPrev = 0; |
+ opt->backPrev = i; |
+ opt->prev1IsChar = False; |
+ } |
+ } |
+ while (--repLen >= 2); |
+ } |
+ |
+ normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[p->state]); |
+ |
+ len = ((repLens[0] >= 2) ? repLens[0] + 1 : 2); |
+ if (len <= mainLen) |
+ { |
+ UInt32 offs = 0; |
+ while (len > matches[offs]) |
+ offs += 2; |
+ for (; ; len++) |
+ { |
+ COptimal *opt; |
+ UInt32 distance = matches[offs + 1]; |
+ |
+ UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN]; |
+ UInt32 lenToPosState = GetLenToPosState(len); |
+ if (distance < kNumFullDistances) |
+ curAndLenPrice += p->distancesPrices[lenToPosState][distance]; |
+ else |
+ { |
+ UInt32 slot; |
+ GetPosSlot2(distance, slot); |
+ curAndLenPrice += p->alignPrices[distance & kAlignMask] + p->posSlotPrices[lenToPosState][slot]; |
+ } |
+ opt = &p->opt[len]; |
+ if (curAndLenPrice < opt->price) |
+ { |
+ opt->price = curAndLenPrice; |
+ opt->posPrev = 0; |
+ opt->backPrev = distance + LZMA_NUM_REPS; |
+ opt->prev1IsChar = False; |
+ } |
+ if (len == matches[offs]) |
+ { |
+ offs += 2; |
+ if (offs == numPairs) |
+ break; |
+ } |
+ } |
+ } |
+ |
+ cur = 0; |
+ |
+ #ifdef SHOW_STAT2 |
+ if (position >= 0) |
+ { |
+ unsigned i; |
+ printf("\n pos = %4X", position); |
+ for (i = cur; i <= lenEnd; i++) |
+ printf("\nprice[%4X] = %d", position - cur + i, p->opt[i].price); |
+ } |
+ #endif |
+ |
+ for (;;) |
+ { |
+ UInt32 numAvailFull, newLen, numPairs, posPrev, state, posState, startLen; |
+ UInt32 curPrice, curAnd1Price, matchPrice, repMatchPrice; |
+ Bool nextIsChar; |
+ Byte curByte, matchByte; |
+ const Byte *data; |
+ COptimal *curOpt; |
+ COptimal *nextOpt; |
+ |
+ cur++; |
+ if (cur == lenEnd) |
+ return Backward(p, backRes, cur); |
+ |
+ newLen = ReadMatchDistances(p, &numPairs); |
+ if (newLen >= p->numFastBytes) |
+ { |
+ p->numPairs = numPairs; |
+ p->longestMatchLength = newLen; |
+ return Backward(p, backRes, cur); |
+ } |
+ position++; |
+ curOpt = &p->opt[cur]; |
+ posPrev = curOpt->posPrev; |
+ if (curOpt->prev1IsChar) |
+ { |
+ posPrev--; |
+ if (curOpt->prev2) |
+ { |
+ state = p->opt[curOpt->posPrev2].state; |
+ if (curOpt->backPrev2 < LZMA_NUM_REPS) |
+ state = kRepNextStates[state]; |
+ else |
+ state = kMatchNextStates[state]; |
+ } |
+ else |
+ state = p->opt[posPrev].state; |
+ state = kLiteralNextStates[state]; |
+ } |
+ else |
+ state = p->opt[posPrev].state; |
+ if (posPrev == cur - 1) |
+ { |
+ if (IsShortRep(curOpt)) |
+ state = kShortRepNextStates[state]; |
+ else |
+ state = kLiteralNextStates[state]; |
+ } |
+ else |
+ { |
+ UInt32 pos; |
+ const COptimal *prevOpt; |
+ if (curOpt->prev1IsChar && curOpt->prev2) |
+ { |
+ posPrev = curOpt->posPrev2; |
+ pos = curOpt->backPrev2; |
+ state = kRepNextStates[state]; |
+ } |
+ else |
+ { |
+ pos = curOpt->backPrev; |
+ if (pos < LZMA_NUM_REPS) |
+ state = kRepNextStates[state]; |
+ else |
+ state = kMatchNextStates[state]; |
+ } |
+ prevOpt = &p->opt[posPrev]; |
+ if (pos < LZMA_NUM_REPS) |
+ { |
+ UInt32 i; |
+ reps[0] = prevOpt->backs[pos]; |
+ for (i = 1; i <= pos; i++) |
+ reps[i] = prevOpt->backs[i - 1]; |
+ for (; i < LZMA_NUM_REPS; i++) |
+ reps[i] = prevOpt->backs[i]; |
+ } |
+ else |
+ { |
+ UInt32 i; |
+ reps[0] = (pos - LZMA_NUM_REPS); |
+ for (i = 1; i < LZMA_NUM_REPS; i++) |
+ reps[i] = prevOpt->backs[i - 1]; |
+ } |
+ } |
+ curOpt->state = (CState)state; |
+ |
+ curOpt->backs[0] = reps[0]; |
+ curOpt->backs[1] = reps[1]; |
+ curOpt->backs[2] = reps[2]; |
+ curOpt->backs[3] = reps[3]; |
+ |
+ curPrice = curOpt->price; |
+ nextIsChar = False; |
+ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; |
+ curByte = *data; |
+ matchByte = *(data - (reps[0] + 1)); |
+ |
+ posState = (position & p->pbMask); |
+ |
+ curAnd1Price = curPrice + GET_PRICE_0(p->isMatch[state][posState]); |
+ { |
+ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1)); |
+ curAnd1Price += |
+ (!IsCharState(state) ? |
+ LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) : |
+ LitEnc_GetPrice(probs, curByte, p->ProbPrices)); |
+ } |
+ |
+ nextOpt = &p->opt[cur + 1]; |
+ |
+ if (curAnd1Price < nextOpt->price) |
+ { |
+ nextOpt->price = curAnd1Price; |
+ nextOpt->posPrev = cur; |
+ MakeAsChar(nextOpt); |
+ nextIsChar = True; |
+ } |
+ |
+ matchPrice = curPrice + GET_PRICE_1(p->isMatch[state][posState]); |
+ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[state]); |
+ |
+ if (matchByte == curByte && !(nextOpt->posPrev < cur && nextOpt->backPrev == 0)) |
+ { |
+ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, state, posState); |
+ if (shortRepPrice <= nextOpt->price) |
+ { |
+ nextOpt->price = shortRepPrice; |
+ nextOpt->posPrev = cur; |
+ MakeAsShortRep(nextOpt); |
+ nextIsChar = True; |
+ } |
+ } |
+ numAvailFull = p->numAvail; |
+ { |
+ UInt32 temp = kNumOpts - 1 - cur; |
+ if (temp < numAvailFull) |
+ numAvailFull = temp; |
+ } |
+ |
+ if (numAvailFull < 2) |
+ continue; |
+ numAvail = (numAvailFull <= p->numFastBytes ? numAvailFull : p->numFastBytes); |
+ |
+ if (!nextIsChar && matchByte != curByte) /* speed optimization */ |
+ { |
+ /* try Literal + rep0 */ |
+ UInt32 temp; |
+ UInt32 lenTest2; |
+ const Byte *data2 = data - (reps[0] + 1); |
+ UInt32 limit = p->numFastBytes + 1; |
+ if (limit > numAvailFull) |
+ limit = numAvailFull; |
+ |
+ for (temp = 1; temp < limit && data[temp] == data2[temp]; temp++); |
+ lenTest2 = temp - 1; |
+ if (lenTest2 >= 2) |
+ { |
+ UInt32 state2 = kLiteralNextStates[state]; |
+ UInt32 posStateNext = (position + 1) & p->pbMask; |
+ UInt32 nextRepMatchPrice = curAnd1Price + |
+ GET_PRICE_1(p->isMatch[state2][posStateNext]) + |
+ GET_PRICE_1(p->isRep[state2]); |
+ /* for (; lenTest2 >= 2; lenTest2--) */ |
+ { |
+ UInt32 curAndLenPrice; |
+ COptimal *opt; |
+ UInt32 offset = cur + 1 + lenTest2; |
+ while (lenEnd < offset) |
+ p->opt[++lenEnd].price = kInfinityPrice; |
+ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext); |
+ opt = &p->opt[offset]; |
+ if (curAndLenPrice < opt->price) |
+ { |
+ opt->price = curAndLenPrice; |
+ opt->posPrev = cur + 1; |
+ opt->backPrev = 0; |
+ opt->prev1IsChar = True; |
+ opt->prev2 = False; |
+ } |
+ } |
+ } |
+ } |
+ |
+ startLen = 2; /* speed optimization */ |
+ { |
+ UInt32 repIndex; |
+ for (repIndex = 0; repIndex < LZMA_NUM_REPS; repIndex++) |
+ { |
+ UInt32 lenTest; |
+ UInt32 lenTestTemp; |
+ UInt32 price; |
+ const Byte *data2 = data - (reps[repIndex] + 1); |
+ if (data[0] != data2[0] || data[1] != data2[1]) |
+ continue; |
+ for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++); |
+ while (lenEnd < cur + lenTest) |
+ p->opt[++lenEnd].price = kInfinityPrice; |
+ lenTestTemp = lenTest; |
+ price = repMatchPrice + GetPureRepPrice(p, repIndex, state, posState); |
+ do |
+ { |
+ UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][lenTest - 2]; |
+ COptimal *opt = &p->opt[cur + lenTest]; |
+ if (curAndLenPrice < opt->price) |
+ { |
+ opt->price = curAndLenPrice; |
+ opt->posPrev = cur; |
+ opt->backPrev = repIndex; |
+ opt->prev1IsChar = False; |
+ } |
+ } |
+ while (--lenTest >= 2); |
+ lenTest = lenTestTemp; |
+ |
+ if (repIndex == 0) |
+ startLen = lenTest + 1; |
+ |
+ /* if (_maxMode) */ |
+ { |
+ UInt32 lenTest2 = lenTest + 1; |
+ UInt32 limit = lenTest2 + p->numFastBytes; |
+ UInt32 nextRepMatchPrice; |
+ if (limit > numAvailFull) |
+ limit = numAvailFull; |
+ for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++); |
+ lenTest2 -= lenTest + 1; |
+ if (lenTest2 >= 2) |
+ { |
+ UInt32 state2 = kRepNextStates[state]; |
+ UInt32 posStateNext = (position + lenTest) & p->pbMask; |
+ UInt32 curAndLenCharPrice = |
+ price + p->repLenEnc.prices[posState][lenTest - 2] + |
+ GET_PRICE_0(p->isMatch[state2][posStateNext]) + |
+ LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]), |
+ data[lenTest], data2[lenTest], p->ProbPrices); |
+ state2 = kLiteralNextStates[state2]; |
+ posStateNext = (position + lenTest + 1) & p->pbMask; |
+ nextRepMatchPrice = curAndLenCharPrice + |
+ GET_PRICE_1(p->isMatch[state2][posStateNext]) + |
+ GET_PRICE_1(p->isRep[state2]); |
+ |
+ /* for (; lenTest2 >= 2; lenTest2--) */ |
+ { |
+ UInt32 curAndLenPrice; |
+ COptimal *opt; |
+ UInt32 offset = cur + lenTest + 1 + lenTest2; |
+ while (lenEnd < offset) |
+ p->opt[++lenEnd].price = kInfinityPrice; |
+ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext); |
+ opt = &p->opt[offset]; |
+ if (curAndLenPrice < opt->price) |
+ { |
+ opt->price = curAndLenPrice; |
+ opt->posPrev = cur + lenTest + 1; |
+ opt->backPrev = 0; |
+ opt->prev1IsChar = True; |
+ opt->prev2 = True; |
+ opt->posPrev2 = cur; |
+ opt->backPrev2 = repIndex; |
+ } |
+ } |
+ } |
+ } |
+ } |
+ } |
+ /* for (UInt32 lenTest = 2; lenTest <= newLen; lenTest++) */ |
+ if (newLen > numAvail) |
+ { |
+ newLen = numAvail; |
+ for (numPairs = 0; newLen > matches[numPairs]; numPairs += 2); |
+ matches[numPairs] = newLen; |
+ numPairs += 2; |
+ } |
+ if (newLen >= startLen) |
+ { |
+ UInt32 normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[state]); |
+ UInt32 offs, curBack, posSlot; |
+ UInt32 lenTest; |
+ while (lenEnd < cur + newLen) |
+ p->opt[++lenEnd].price = kInfinityPrice; |
+ |
+ offs = 0; |
+ while (startLen > matches[offs]) |
+ offs += 2; |
+ curBack = matches[offs + 1]; |
+ GetPosSlot2(curBack, posSlot); |
+ for (lenTest = /*2*/ startLen; ; lenTest++) |
+ { |
+ UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][lenTest - LZMA_MATCH_LEN_MIN]; |
+ UInt32 lenToPosState = GetLenToPosState(lenTest); |
+ COptimal *opt; |
+ if (curBack < kNumFullDistances) |
+ curAndLenPrice += p->distancesPrices[lenToPosState][curBack]; |
+ else |
+ curAndLenPrice += p->posSlotPrices[lenToPosState][posSlot] + p->alignPrices[curBack & kAlignMask]; |
+ |
+ opt = &p->opt[cur + lenTest]; |
+ if (curAndLenPrice < opt->price) |
+ { |
+ opt->price = curAndLenPrice; |
+ opt->posPrev = cur; |
+ opt->backPrev = curBack + LZMA_NUM_REPS; |
+ opt->prev1IsChar = False; |
+ } |
+ |
+ if (/*_maxMode && */lenTest == matches[offs]) |
+ { |
+ /* Try Match + Literal + Rep0 */ |
+ const Byte *data2 = data - (curBack + 1); |
+ UInt32 lenTest2 = lenTest + 1; |
+ UInt32 limit = lenTest2 + p->numFastBytes; |
+ UInt32 nextRepMatchPrice; |
+ if (limit > numAvailFull) |
+ limit = numAvailFull; |
+ for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++); |
+ lenTest2 -= lenTest + 1; |
+ if (lenTest2 >= 2) |
+ { |
+ UInt32 state2 = kMatchNextStates[state]; |
+ UInt32 posStateNext = (position + lenTest) & p->pbMask; |
+ UInt32 curAndLenCharPrice = curAndLenPrice + |
+ GET_PRICE_0(p->isMatch[state2][posStateNext]) + |
+ LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]), |
+ data[lenTest], data2[lenTest], p->ProbPrices); |
+ state2 = kLiteralNextStates[state2]; |
+ posStateNext = (posStateNext + 1) & p->pbMask; |
+ nextRepMatchPrice = curAndLenCharPrice + |
+ GET_PRICE_1(p->isMatch[state2][posStateNext]) + |
+ GET_PRICE_1(p->isRep[state2]); |
+ |
+ /* for (; lenTest2 >= 2; lenTest2--) */ |
+ { |
+ UInt32 offset = cur + lenTest + 1 + lenTest2; |
+ UInt32 curAndLenPrice; |
+ COptimal *opt; |
+ while (lenEnd < offset) |
+ p->opt[++lenEnd].price = kInfinityPrice; |
+ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext); |
+ opt = &p->opt[offset]; |
+ if (curAndLenPrice < opt->price) |
+ { |
+ opt->price = curAndLenPrice; |
+ opt->posPrev = cur + lenTest + 1; |
+ opt->backPrev = 0; |
+ opt->prev1IsChar = True; |
+ opt->prev2 = True; |
+ opt->posPrev2 = cur; |
+ opt->backPrev2 = curBack + LZMA_NUM_REPS; |
+ } |
+ } |
+ } |
+ offs += 2; |
+ if (offs == numPairs) |
+ break; |
+ curBack = matches[offs + 1]; |
+ if (curBack >= kNumFullDistances) |
+ GetPosSlot2(curBack, posSlot); |
+ } |
+ } |
+ } |
+ } |
+} |
+ |
+#define ChangePair(smallDist, bigDist) (((bigDist) >> 7) > (smallDist)) |
+ |
+static UInt32 GetOptimumFast(CLzmaEnc *p, UInt32 *backRes) |
+{ |
+ UInt32 numAvail, mainLen, mainDist, numPairs, repIndex, repLen, i; |
+ const Byte *data; |
+ const UInt32 *matches; |
+ |
+ if (p->additionalOffset == 0) |
+ mainLen = ReadMatchDistances(p, &numPairs); |
+ else |
+ { |
+ mainLen = p->longestMatchLength; |
+ numPairs = p->numPairs; |
+ } |
+ |
+ numAvail = p->numAvail; |
+ *backRes = (UInt32)-1; |
+ if (numAvail < 2) |
+ return 1; |
+ if (numAvail > LZMA_MATCH_LEN_MAX) |
+ numAvail = LZMA_MATCH_LEN_MAX; |
+ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; |
+ |
+ repLen = repIndex = 0; |
+ for (i = 0; i < LZMA_NUM_REPS; i++) |
+ { |
+ UInt32 len; |
+ const Byte *data2 = data - (p->reps[i] + 1); |
+ if (data[0] != data2[0] || data[1] != data2[1]) |
+ continue; |
+ for (len = 2; len < numAvail && data[len] == data2[len]; len++); |
+ if (len >= p->numFastBytes) |
+ { |
+ *backRes = i; |
+ MovePos(p, len - 1); |
+ return len; |
+ } |
+ if (len > repLen) |
+ { |
+ repIndex = i; |
+ repLen = len; |
+ } |
+ } |
+ |
+ matches = p->matches; |
+ if (mainLen >= p->numFastBytes) |
+ { |
+ *backRes = matches[numPairs - 1] + LZMA_NUM_REPS; |
+ MovePos(p, mainLen - 1); |
+ return mainLen; |
+ } |
+ |
+ mainDist = 0; /* for GCC */ |
+ if (mainLen >= 2) |
+ { |
+ mainDist = matches[numPairs - 1]; |
+ while (numPairs > 2 && mainLen == matches[numPairs - 4] + 1) |
+ { |
+ if (!ChangePair(matches[numPairs - 3], mainDist)) |
+ break; |
+ numPairs -= 2; |
+ mainLen = matches[numPairs - 2]; |
+ mainDist = matches[numPairs - 1]; |
+ } |
+ if (mainLen == 2 && mainDist >= 0x80) |
+ mainLen = 1; |
+ } |
+ |
+ if (repLen >= 2 && ( |
+ (repLen + 1 >= mainLen) || |
+ (repLen + 2 >= mainLen && mainDist >= (1 << 9)) || |
+ (repLen + 3 >= mainLen && mainDist >= (1 << 15)))) |
+ { |
+ *backRes = repIndex; |
+ MovePos(p, repLen - 1); |
+ return repLen; |
+ } |
+ |
+ if (mainLen < 2 || numAvail <= 2) |
+ return 1; |
+ |
+ p->longestMatchLength = ReadMatchDistances(p, &p->numPairs); |
+ if (p->longestMatchLength >= 2) |
+ { |
+ UInt32 newDistance = matches[p->numPairs - 1]; |
+ if ((p->longestMatchLength >= mainLen && newDistance < mainDist) || |
+ (p->longestMatchLength == mainLen + 1 && !ChangePair(mainDist, newDistance)) || |
+ (p->longestMatchLength > mainLen + 1) || |
+ (p->longestMatchLength + 1 >= mainLen && mainLen >= 3 && ChangePair(newDistance, mainDist))) |
+ return 1; |
+ } |
+ |
+ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; |
+ for (i = 0; i < LZMA_NUM_REPS; i++) |
+ { |
+ UInt32 len, limit; |
+ const Byte *data2 = data - (p->reps[i] + 1); |
+ if (data[0] != data2[0] || data[1] != data2[1]) |
+ continue; |
+ limit = mainLen - 1; |
+ for (len = 2; len < limit && data[len] == data2[len]; len++); |
+ if (len >= limit) |
+ return 1; |
+ } |
+ *backRes = mainDist + LZMA_NUM_REPS; |
+ MovePos(p, mainLen - 2); |
+ return mainLen; |
+} |
+ |
+static void WriteEndMarker(CLzmaEnc *p, UInt32 posState) |
+{ |
+ UInt32 len; |
+ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1); |
+ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0); |
+ p->state = kMatchNextStates[p->state]; |
+ len = LZMA_MATCH_LEN_MIN; |
+ LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices); |
+ RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, (1 << kNumPosSlotBits) - 1); |
+ RangeEnc_EncodeDirectBits(&p->rc, (((UInt32)1 << 30) - 1) >> kNumAlignBits, 30 - kNumAlignBits); |
+ RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, kAlignMask); |
+} |
+ |
+static SRes CheckErrors(CLzmaEnc *p) |
+{ |
+ if (p->result != SZ_OK) |
+ return p->result; |
+ if (p->rc.res != SZ_OK) |
+ p->result = SZ_ERROR_WRITE; |
+ if (p->matchFinderBase.result != SZ_OK) |
+ p->result = SZ_ERROR_READ; |
+ if (p->result != SZ_OK) |
+ p->finished = True; |
+ return p->result; |
+} |
+ |
+static SRes Flush(CLzmaEnc *p, UInt32 nowPos) |
+{ |
+ /* ReleaseMFStream(); */ |
+ p->finished = True; |
+ if (p->writeEndMark) |
+ WriteEndMarker(p, nowPos & p->pbMask); |
+ RangeEnc_FlushData(&p->rc); |
+ RangeEnc_FlushStream(&p->rc); |
+ return CheckErrors(p); |
+} |
+ |
+static void FillAlignPrices(CLzmaEnc *p) |
+{ |
+ UInt32 i; |
+ for (i = 0; i < kAlignTableSize; i++) |
+ p->alignPrices[i] = RcTree_ReverseGetPrice(p->posAlignEncoder, kNumAlignBits, i, p->ProbPrices); |
+ p->alignPriceCount = 0; |
+} |
+ |
+static void FillDistancesPrices(CLzmaEnc *p) |
+{ |
+ UInt32 tempPrices[kNumFullDistances]; |
+ UInt32 i, lenToPosState; |
+ for (i = kStartPosModelIndex; i < kNumFullDistances; i++) |
+ { |
+ UInt32 posSlot = GetPosSlot1(i); |
+ UInt32 footerBits = ((posSlot >> 1) - 1); |
+ UInt32 base = ((2 | (posSlot & 1)) << footerBits); |
+ tempPrices[i] = RcTree_ReverseGetPrice(p->posEncoders + base - posSlot - 1, footerBits, i - base, p->ProbPrices); |
+ } |
+ |
+ for (lenToPosState = 0; lenToPosState < kNumLenToPosStates; lenToPosState++) |
+ { |
+ UInt32 posSlot; |
+ const CLzmaProb *encoder = p->posSlotEncoder[lenToPosState]; |
+ UInt32 *posSlotPrices = p->posSlotPrices[lenToPosState]; |
+ for (posSlot = 0; posSlot < p->distTableSize; posSlot++) |
+ posSlotPrices[posSlot] = RcTree_GetPrice(encoder, kNumPosSlotBits, posSlot, p->ProbPrices); |
+ for (posSlot = kEndPosModelIndex; posSlot < p->distTableSize; posSlot++) |
+ posSlotPrices[posSlot] += ((((posSlot >> 1) - 1) - kNumAlignBits) << kNumBitPriceShiftBits); |
+ |
+ { |
+ UInt32 *distancesPrices = p->distancesPrices[lenToPosState]; |
+ UInt32 i; |
+ for (i = 0; i < kStartPosModelIndex; i++) |
+ distancesPrices[i] = posSlotPrices[i]; |
+ for (; i < kNumFullDistances; i++) |
+ distancesPrices[i] = posSlotPrices[GetPosSlot1(i)] + tempPrices[i]; |
+ } |
+ } |
+ p->matchPriceCount = 0; |
+} |
+ |
+void LzmaEnc_Construct(CLzmaEnc *p) |
+{ |
+ RangeEnc_Construct(&p->rc); |
+ MatchFinder_Construct(&p->matchFinderBase); |
+ #ifndef _7ZIP_ST |
+ MatchFinderMt_Construct(&p->matchFinderMt); |
+ p->matchFinderMt.MatchFinder = &p->matchFinderBase; |
+ #endif |
+ |
+ { |
+ CLzmaEncProps props; |
+ LzmaEncProps_Init(&props); |
+ LzmaEnc_SetProps(p, &props); |
+ } |
+ |
+ #ifndef LZMA_LOG_BSR |
+ LzmaEnc_FastPosInit(p->g_FastPos); |
+ #endif |
+ |
+ LzmaEnc_InitPriceTables(p->ProbPrices); |
+ p->litProbs = 0; |
+ p->saveState.litProbs = 0; |
+} |
+ |
+CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc) |
+{ |
+ void *p; |
+ p = alloc->Alloc(alloc, sizeof(CLzmaEnc)); |
+ if (p != 0) |
+ LzmaEnc_Construct((CLzmaEnc *)p); |
+ return p; |
+} |
+ |
+void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAlloc *alloc) |
+{ |
+ alloc->Free(alloc, p->litProbs); |
+ alloc->Free(alloc, p->saveState.litProbs); |
+ p->litProbs = 0; |
+ p->saveState.litProbs = 0; |
+} |
+ |
+void LzmaEnc_Destruct(CLzmaEnc *p, ISzAlloc *alloc, ISzAlloc *allocBig) |
+{ |
+ #ifndef _7ZIP_ST |
+ MatchFinderMt_Destruct(&p->matchFinderMt, allocBig); |
+ #endif |
+ MatchFinder_Free(&p->matchFinderBase, allocBig); |
+ LzmaEnc_FreeLits(p, alloc); |
+ RangeEnc_Free(&p->rc, alloc); |
+} |
+ |
+void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig) |
+{ |
+ LzmaEnc_Destruct((CLzmaEnc *)p, alloc, allocBig); |
+ alloc->Free(alloc, p); |
+} |
+ |
+static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, Bool useLimits, UInt32 maxPackSize, UInt32 maxUnpackSize) |
+{ |
+ UInt32 nowPos32, startPos32; |
+ if (p->needInit) |
+ { |
+ p->matchFinder.Init(p->matchFinderObj); |
+ p->needInit = 0; |
+ } |
+ |
+ if (p->finished) |
+ return p->result; |
+ RINOK(CheckErrors(p)); |
+ |
+ nowPos32 = (UInt32)p->nowPos64; |
+ startPos32 = nowPos32; |
+ |
+ if (p->nowPos64 == 0) |
+ { |
+ UInt32 numPairs; |
+ Byte curByte; |
+ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0) |
+ return Flush(p, nowPos32); |
+ ReadMatchDistances(p, &numPairs); |
+ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][0], 0); |
+ p->state = kLiteralNextStates[p->state]; |
+ curByte = p->matchFinder.GetIndexByte(p->matchFinderObj, 0 - p->additionalOffset); |
+ LitEnc_Encode(&p->rc, p->litProbs, curByte); |
+ p->additionalOffset--; |
+ nowPos32++; |
+ } |
+ |
+ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) != 0) |
+ for (;;) |
+ { |
+ UInt32 pos, len, posState; |
+ |
+ if (p->fastMode) |
+ len = GetOptimumFast(p, &pos); |
+ else |
+ len = GetOptimum(p, nowPos32, &pos); |
+ |
+ #ifdef SHOW_STAT2 |
+ printf("\n pos = %4X, len = %d pos = %d", nowPos32, len, pos); |
+ #endif |
+ |
+ posState = nowPos32 & p->pbMask; |
+ if (len == 1 && pos == (UInt32)-1) |
+ { |
+ Byte curByte; |
+ CLzmaProb *probs; |
+ const Byte *data; |
+ |
+ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 0); |
+ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset; |
+ curByte = *data; |
+ probs = LIT_PROBS(nowPos32, *(data - 1)); |
+ if (IsCharState(p->state)) |
+ LitEnc_Encode(&p->rc, probs, curByte); |
+ else |
+ LitEnc_EncodeMatched(&p->rc, probs, curByte, *(data - p->reps[0] - 1)); |
+ p->state = kLiteralNextStates[p->state]; |
+ } |
+ else |
+ { |
+ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1); |
+ if (pos < LZMA_NUM_REPS) |
+ { |
+ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 1); |
+ if (pos == 0) |
+ { |
+ RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 0); |
+ RangeEnc_EncodeBit(&p->rc, &p->isRep0Long[p->state][posState], ((len == 1) ? 0 : 1)); |
+ } |
+ else |
+ { |
+ UInt32 distance = p->reps[pos]; |
+ RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 1); |
+ if (pos == 1) |
+ RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 0); |
+ else |
+ { |
+ RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 1); |
+ RangeEnc_EncodeBit(&p->rc, &p->isRepG2[p->state], pos - 2); |
+ if (pos == 3) |
+ p->reps[3] = p->reps[2]; |
+ p->reps[2] = p->reps[1]; |
+ } |
+ p->reps[1] = p->reps[0]; |
+ p->reps[0] = distance; |
+ } |
+ if (len == 1) |
+ p->state = kShortRepNextStates[p->state]; |
+ else |
+ { |
+ LenEnc_Encode2(&p->repLenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices); |
+ p->state = kRepNextStates[p->state]; |
+ } |
+ } |
+ else |
+ { |
+ UInt32 posSlot; |
+ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0); |
+ p->state = kMatchNextStates[p->state]; |
+ LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices); |
+ pos -= LZMA_NUM_REPS; |
+ GetPosSlot(pos, posSlot); |
+ RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, posSlot); |
+ |
+ if (posSlot >= kStartPosModelIndex) |
+ { |
+ UInt32 footerBits = ((posSlot >> 1) - 1); |
+ UInt32 base = ((2 | (posSlot & 1)) << footerBits); |
+ UInt32 posReduced = pos - base; |
+ |
+ if (posSlot < kEndPosModelIndex) |
+ RcTree_ReverseEncode(&p->rc, p->posEncoders + base - posSlot - 1, footerBits, posReduced); |
+ else |
+ { |
+ RangeEnc_EncodeDirectBits(&p->rc, posReduced >> kNumAlignBits, footerBits - kNumAlignBits); |
+ RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, posReduced & kAlignMask); |
+ p->alignPriceCount++; |
+ } |
+ } |
+ p->reps[3] = p->reps[2]; |
+ p->reps[2] = p->reps[1]; |
+ p->reps[1] = p->reps[0]; |
+ p->reps[0] = pos; |
+ p->matchPriceCount++; |
+ } |
+ } |
+ p->additionalOffset -= len; |
+ nowPos32 += len; |
+ if (p->additionalOffset == 0) |
+ { |
+ UInt32 processed; |
+ if (!p->fastMode) |
+ { |
+ if (p->matchPriceCount >= (1 << 7)) |
+ FillDistancesPrices(p); |
+ if (p->alignPriceCount >= kAlignTableSize) |
+ FillAlignPrices(p); |
+ } |
+ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0) |
+ break; |
+ processed = nowPos32 - startPos32; |
+ if (useLimits) |
+ { |
+ if (processed + kNumOpts + 300 >= maxUnpackSize || |
+ RangeEnc_GetProcessed(&p->rc) + kNumOpts * 2 >= maxPackSize) |
+ break; |
+ } |
+ else if (processed >= (1 << 15)) |
+ { |
+ p->nowPos64 += nowPos32 - startPos32; |
+ return CheckErrors(p); |
+ } |
+ } |
+ } |
+ p->nowPos64 += nowPos32 - startPos32; |
+ return Flush(p, nowPos32); |
+} |
+ |
+#define kBigHashDicLimit ((UInt32)1 << 24) |
+ |
+static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig) |
+{ |
+ UInt32 beforeSize = kNumOpts; |
+ Bool btMode; |
+ if (!RangeEnc_Alloc(&p->rc, alloc)) |
+ return SZ_ERROR_MEM; |
+ btMode = (p->matchFinderBase.btMode != 0); |
+ #ifndef _7ZIP_ST |
+ p->mtMode = (p->multiThread && !p->fastMode && btMode); |
+ #endif |
+ |
+ { |
+ unsigned lclp = p->lc + p->lp; |
+ if (p->litProbs == 0 || p->saveState.litProbs == 0 || p->lclp != lclp) |
+ { |
+ LzmaEnc_FreeLits(p, alloc); |
+ p->litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb)); |
+ p->saveState.litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb)); |
+ if (p->litProbs == 0 || p->saveState.litProbs == 0) |
+ { |
+ LzmaEnc_FreeLits(p, alloc); |
+ return SZ_ERROR_MEM; |
+ } |
+ p->lclp = lclp; |
+ } |
+ } |
+ |
+ p->matchFinderBase.bigHash = (p->dictSize > kBigHashDicLimit); |
+ |
+ if (beforeSize + p->dictSize < keepWindowSize) |
+ beforeSize = keepWindowSize - p->dictSize; |
+ |
+ #ifndef _7ZIP_ST |
+ if (p->mtMode) |
+ { |
+ RINOK(MatchFinderMt_Create(&p->matchFinderMt, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig)); |
+ p->matchFinderObj = &p->matchFinderMt; |
+ MatchFinderMt_CreateVTable(&p->matchFinderMt, &p->matchFinder); |
+ } |
+ else |
+ #endif |
+ { |
+ if (!MatchFinder_Create(&p->matchFinderBase, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig)) |
+ return SZ_ERROR_MEM; |
+ p->matchFinderObj = &p->matchFinderBase; |
+ MatchFinder_CreateVTable(&p->matchFinderBase, &p->matchFinder); |
+ } |
+ return SZ_OK; |
+} |
+ |
+void LzmaEnc_Init(CLzmaEnc *p) |
+{ |
+ UInt32 i; |
+ p->state = 0; |
+ for (i = 0 ; i < LZMA_NUM_REPS; i++) |
+ p->reps[i] = 0; |
+ |
+ RangeEnc_Init(&p->rc); |
+ |
+ |
+ for (i = 0; i < kNumStates; i++) |
+ { |
+ UInt32 j; |
+ for (j = 0; j < LZMA_NUM_PB_STATES_MAX; j++) |
+ { |
+ p->isMatch[i][j] = kProbInitValue; |
+ p->isRep0Long[i][j] = kProbInitValue; |
+ } |
+ p->isRep[i] = kProbInitValue; |
+ p->isRepG0[i] = kProbInitValue; |
+ p->isRepG1[i] = kProbInitValue; |
+ p->isRepG2[i] = kProbInitValue; |
+ } |
+ |
+ { |
+ UInt32 num = 0x300 << (p->lp + p->lc); |
+ for (i = 0; i < num; i++) |
+ p->litProbs[i] = kProbInitValue; |
+ } |
+ |
+ { |
+ for (i = 0; i < kNumLenToPosStates; i++) |
+ { |
+ CLzmaProb *probs = p->posSlotEncoder[i]; |
+ UInt32 j; |
+ for (j = 0; j < (1 << kNumPosSlotBits); j++) |
+ probs[j] = kProbInitValue; |
+ } |
+ } |
+ { |
+ for (i = 0; i < kNumFullDistances - kEndPosModelIndex; i++) |
+ p->posEncoders[i] = kProbInitValue; |
+ } |
+ |
+ LenEnc_Init(&p->lenEnc.p); |
+ LenEnc_Init(&p->repLenEnc.p); |
+ |
+ for (i = 0; i < (1 << kNumAlignBits); i++) |
+ p->posAlignEncoder[i] = kProbInitValue; |
+ |
+ p->optimumEndIndex = 0; |
+ p->optimumCurrentIndex = 0; |
+ p->additionalOffset = 0; |
+ |
+ p->pbMask = (1 << p->pb) - 1; |
+ p->lpMask = (1 << p->lp) - 1; |
+} |
+ |
+void LzmaEnc_InitPrices(CLzmaEnc *p) |
+{ |
+ if (!p->fastMode) |
+ { |
+ FillDistancesPrices(p); |
+ FillAlignPrices(p); |
+ } |
+ |
+ p->lenEnc.tableSize = |
+ p->repLenEnc.tableSize = |
+ p->numFastBytes + 1 - LZMA_MATCH_LEN_MIN; |
+ LenPriceEnc_UpdateTables(&p->lenEnc, 1 << p->pb, p->ProbPrices); |
+ LenPriceEnc_UpdateTables(&p->repLenEnc, 1 << p->pb, p->ProbPrices); |
+} |
+ |
+static SRes LzmaEnc_AllocAndInit(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig) |
+{ |
+ UInt32 i; |
+ for (i = 0; i < (UInt32)kDicLogSizeMaxCompress; i++) |
+ if (p->dictSize <= ((UInt32)1 << i)) |
+ break; |
+ p->distTableSize = i * 2; |
+ |
+ p->finished = False; |
+ p->result = SZ_OK; |
+ RINOK(LzmaEnc_Alloc(p, keepWindowSize, alloc, allocBig)); |
+ LzmaEnc_Init(p); |
+ LzmaEnc_InitPrices(p); |
+ p->nowPos64 = 0; |
+ return SZ_OK; |
+} |
+ |
+static SRes LzmaEnc_Prepare(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, |
+ ISzAlloc *alloc, ISzAlloc *allocBig) |
+{ |
+ CLzmaEnc *p = (CLzmaEnc *)pp; |
+ p->matchFinderBase.stream = inStream; |
+ p->needInit = 1; |
+ p->rc.outStream = outStream; |
+ return LzmaEnc_AllocAndInit(p, 0, alloc, allocBig); |
+} |
+ |
+SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp, |
+ ISeqInStream *inStream, UInt32 keepWindowSize, |
+ ISzAlloc *alloc, ISzAlloc *allocBig) |
+{ |
+ CLzmaEnc *p = (CLzmaEnc *)pp; |
+ p->matchFinderBase.stream = inStream; |
+ p->needInit = 1; |
+ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig); |
+} |
+ |
+static void LzmaEnc_SetInputBuf(CLzmaEnc *p, const Byte *src, SizeT srcLen) |
+{ |
+ p->matchFinderBase.directInput = 1; |
+ p->matchFinderBase.bufferBase = (Byte *)src; |
+ p->matchFinderBase.directInputRem = srcLen; |
+} |
+ |
+SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen, |
+ UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig) |
+{ |
+ CLzmaEnc *p = (CLzmaEnc *)pp; |
+ LzmaEnc_SetInputBuf(p, src, srcLen); |
+ p->needInit = 1; |
+ |
+ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig); |
+} |
+ |
+void LzmaEnc_Finish(CLzmaEncHandle pp) |
+{ |
+ #ifndef _7ZIP_ST |
+ CLzmaEnc *p = (CLzmaEnc *)pp; |
+ if (p->mtMode) |
+ MatchFinderMt_ReleaseStream(&p->matchFinderMt); |
+ #else |
+ pp = pp; |
+ #endif |
+} |
+ |
+typedef struct |
+{ |
+ ISeqOutStream funcTable; |
+ Byte *data; |
+ SizeT rem; |
+ Bool overflow; |
+} CSeqOutStreamBuf; |
+ |
+static size_t MyWrite(void *pp, const void *data, size_t size) |
+{ |
+ CSeqOutStreamBuf *p = (CSeqOutStreamBuf *)pp; |
+ if (p->rem < size) |
+ { |
+ size = p->rem; |
+ p->overflow = True; |
+ } |
+ memcpy(p->data, data, size); |
+ p->rem -= size; |
+ p->data += size; |
+ return size; |
+} |
+ |
+ |
+UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp) |
+{ |
+ const CLzmaEnc *p = (CLzmaEnc *)pp; |
+ return p->matchFinder.GetNumAvailableBytes(p->matchFinderObj); |
+} |
+ |
+const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp) |
+{ |
+ const CLzmaEnc *p = (CLzmaEnc *)pp; |
+ return p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset; |
+} |
+ |
+SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, Bool reInit, |
+ Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize) |
+{ |
+ CLzmaEnc *p = (CLzmaEnc *)pp; |
+ UInt64 nowPos64; |
+ SRes res; |
+ CSeqOutStreamBuf outStream; |
+ |
+ outStream.funcTable.Write = MyWrite; |
+ outStream.data = dest; |
+ outStream.rem = *destLen; |
+ outStream.overflow = False; |
+ |
+ p->writeEndMark = False; |
+ p->finished = False; |
+ p->result = SZ_OK; |
+ |
+ if (reInit) |
+ LzmaEnc_Init(p); |
+ LzmaEnc_InitPrices(p); |
+ nowPos64 = p->nowPos64; |
+ RangeEnc_Init(&p->rc); |
+ p->rc.outStream = &outStream.funcTable; |
+ |
+ res = LzmaEnc_CodeOneBlock(p, True, desiredPackSize, *unpackSize); |
+ |
+ *unpackSize = (UInt32)(p->nowPos64 - nowPos64); |
+ *destLen -= outStream.rem; |
+ if (outStream.overflow) |
+ return SZ_ERROR_OUTPUT_EOF; |
+ |
+ return res; |
+} |
+ |
+static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgress *progress) |
+{ |
+ SRes res = SZ_OK; |
+ |
+ #ifndef _7ZIP_ST |
+ Byte allocaDummy[0x300]; |
+ int i = 0; |
+ for (i = 0; i < 16; i++) |
+ allocaDummy[i] = (Byte)i; |
+ #endif |
+ |
+ for (;;) |
+ { |
+ res = LzmaEnc_CodeOneBlock(p, False, 0, 0); |
+ if (res != SZ_OK || p->finished != 0) |
+ break; |
+ if (progress != 0) |
+ { |
+ res = progress->Progress(progress, p->nowPos64, RangeEnc_GetProcessed(&p->rc)); |
+ if (res != SZ_OK) |
+ { |
+ res = SZ_ERROR_PROGRESS; |
+ break; |
+ } |
+ } |
+ } |
+ LzmaEnc_Finish(p); |
+ return res; |
+} |
+ |
+SRes LzmaEnc_Encode(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress, |
+ ISzAlloc *alloc, ISzAlloc *allocBig) |
+{ |
+ RINOK(LzmaEnc_Prepare(pp, outStream, inStream, alloc, allocBig)); |
+ return LzmaEnc_Encode2((CLzmaEnc *)pp, progress); |
+} |
+ |
+SRes LzmaEnc_WriteProperties(CLzmaEncHandle pp, Byte *props, SizeT *size) |
+{ |
+ CLzmaEnc *p = (CLzmaEnc *)pp; |
+ int i; |
+ UInt32 dictSize = p->dictSize; |
+ if (*size < LZMA_PROPS_SIZE) |
+ return SZ_ERROR_PARAM; |
+ *size = LZMA_PROPS_SIZE; |
+ props[0] = (Byte)((p->pb * 5 + p->lp) * 9 + p->lc); |
+ |
+ for (i = 11; i <= 30; i++) |
+ { |
+ if (dictSize <= ((UInt32)2 << i)) |
+ { |
+ dictSize = (2 << i); |
+ break; |
+ } |
+ if (dictSize <= ((UInt32)3 << i)) |
+ { |
+ dictSize = (3 << i); |
+ break; |
+ } |
+ } |
+ |
+ for (i = 0; i < 4; i++) |
+ props[1 + i] = (Byte)(dictSize >> (8 * i)); |
+ return SZ_OK; |
+} |
+ |
+SRes LzmaEnc_MemEncode(CLzmaEncHandle pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, |
+ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig) |
+{ |
+ SRes res; |
+ CLzmaEnc *p = (CLzmaEnc *)pp; |
+ |
+ CSeqOutStreamBuf outStream; |
+ |
+ LzmaEnc_SetInputBuf(p, src, srcLen); |
+ |
+ outStream.funcTable.Write = MyWrite; |
+ outStream.data = dest; |
+ outStream.rem = *destLen; |
+ outStream.overflow = False; |
+ |
+ p->writeEndMark = writeEndMark; |
+ |
+ p->rc.outStream = &outStream.funcTable; |
+ res = LzmaEnc_MemPrepare(pp, src, srcLen, 0, alloc, allocBig); |
+ if (res == SZ_OK) |
+ res = LzmaEnc_Encode2(p, progress); |
+ |
+ *destLen -= outStream.rem; |
+ if (outStream.overflow) |
+ return SZ_ERROR_OUTPUT_EOF; |
+ return res; |
+} |
+ |
+SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, |
+ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark, |
+ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig) |
+{ |
+ CLzmaEnc *p = (CLzmaEnc *)LzmaEnc_Create(alloc); |
+ SRes res; |
+ if (p == 0) |
+ return SZ_ERROR_MEM; |
+ |
+ res = LzmaEnc_SetProps(p, props); |
+ if (res == SZ_OK) |
+ { |
+ res = LzmaEnc_WriteProperties(p, propsEncoded, propsSize); |
+ if (res == SZ_OK) |
+ res = LzmaEnc_MemEncode(p, dest, destLen, src, srcLen, |
+ writeEndMark, progress, alloc, allocBig); |
+ } |
+ |
+ LzmaEnc_Destroy(p, alloc, allocBig); |
+ return res; |
+} |
--- /dev/null |
+++ b/lib/lzma/Makefile |
@@ -0,0 +1,7 @@ |
+lzma_compress-objs := LzFind.o LzmaEnc.o |
+lzma_decompress-objs := LzmaDec.o |
+ |
+obj-$(CONFIG_LZMA_COMPRESS) += lzma_compress.o |
+obj-$(CONFIG_LZMA_DECOMPRESS) += lzma_decompress.o |
+ |
+EXTRA_CFLAGS += -Iinclude/linux -Iinclude/linux/lzma -include types.h |
/branches/18.06.1/target/linux/generic/pending-4.9/532-jffs2_eofdetect.patch |
---|
@@ -0,0 +1,65 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: fs: jffs2: EOF marker |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
fs/jffs2/build.c | 10 ++++++++++ |
fs/jffs2/scan.c | 21 +++++++++++++++++++-- |
2 files changed, 29 insertions(+), 2 deletions(-) |
--- a/fs/jffs2/build.c |
+++ b/fs/jffs2/build.c |
@@ -117,6 +117,16 @@ static int jffs2_build_filesystem(struct |
dbg_fsbuild("scanned flash completely\n"); |
jffs2_dbg_dump_block_lists_nolock(c); |
+ if (c->flags & (1 << 7)) { |
+ printk("%s(): unlocking the mtd device... ", __func__); |
+ mtd_unlock(c->mtd, 0, c->mtd->size); |
+ printk("done.\n"); |
+ |
+ printk("%s(): erasing all blocks after the end marker... ", __func__); |
+ jffs2_erase_pending_blocks(c, -1); |
+ printk("done.\n"); |
+ } |
+ |
dbg_fsbuild("pass 1 starting\n"); |
c->flags |= JFFS2_SB_FLAG_BUILDING; |
/* Now scan the directory tree, increasing nlink according to every dirent found. */ |
--- a/fs/jffs2/scan.c |
+++ b/fs/jffs2/scan.c |
@@ -148,8 +148,14 @@ int jffs2_scan_medium(struct jffs2_sb_in |
/* reset summary info for next eraseblock scan */ |
jffs2_sum_reset_collected(s); |
- ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), |
- buf_size, s); |
+ if (c->flags & (1 << 7)) { |
+ if (mtd_block_isbad(c->mtd, jeb->offset)) |
+ ret = BLK_STATE_BADBLOCK; |
+ else |
+ ret = BLK_STATE_ALLFF; |
+ } else |
+ ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), |
+ buf_size, s); |
if (ret < 0) |
goto out; |
@@ -561,6 +567,17 @@ full_scan: |
return err; |
} |
+ if ((buf[0] == 0xde) && |
+ (buf[1] == 0xad) && |
+ (buf[2] == 0xc0) && |
+ (buf[3] == 0xde)) { |
+ /* end of filesystem. erase everything after this point */ |
+ printk("%s(): End of filesystem marker found at 0x%x\n", __func__, jeb->offset); |
+ c->flags |= (1 << 7); |
+ |
+ return BLK_STATE_ALLFF; |
+ } |
+ |
/* We temporarily use 'ofs' as a pointer into the buffer/jeb */ |
ofs = 0; |
max_ofs = EMPTY_SCAN_SIZE(c->sector_size); |
/branches/18.06.1/target/linux/generic/pending-4.9/551-ubifs-fix-default-compression-selection.patch |
---|
@@ -0,0 +1,37 @@ |
From: Gabor Juhos <juhosg@openwrt.org> |
Subject: fs: ubifs: fix default compression selection in ubifs |
Signed-off-by: Gabor Juhos <juhosg@openwrt.org> |
--- |
fs/ubifs/sb.c | 13 ++++++++++++- |
1 file changed, 12 insertions(+), 1 deletion(-) |
--- a/fs/ubifs/sb.c |
+++ b/fs/ubifs/sb.c |
@@ -63,6 +63,17 @@ |
/* Default time granularity in nanoseconds */ |
#define DEFAULT_TIME_GRAN 1000000000 |
+static int get_default_compressor(void) |
+{ |
+ if (ubifs_compr_present(UBIFS_COMPR_LZO)) |
+ return UBIFS_COMPR_LZO; |
+ |
+ if (ubifs_compr_present(UBIFS_COMPR_ZLIB)) |
+ return UBIFS_COMPR_ZLIB; |
+ |
+ return UBIFS_COMPR_NONE; |
+} |
+ |
/** |
* create_default_filesystem - format empty UBI volume. |
* @c: UBIFS file-system description object |
@@ -183,7 +194,7 @@ static int create_default_filesystem(str |
if (c->mount_opts.override_compr) |
sup->default_compr = cpu_to_le16(c->mount_opts.compr_type); |
else |
- sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO); |
+ sup->default_compr = cpu_to_le16(get_default_compressor()); |
generate_random_uuid(sup->uuid); |
/branches/18.06.1/target/linux/generic/pending-4.9/600-netfilter_conntrack_flush.patch |
---|
@@ -0,0 +1,95 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: netfilter: add support for flushing conntrack via /proc |
lede-commit 8193bbe59a74d34d6a26d4a8cb857b1952905314 |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
net/netfilter/nf_conntrack_standalone.c | 59 ++++++++++++++++++++++++++++++++- |
1 file changed, 58 insertions(+), 1 deletion(-) |
--- a/net/netfilter/nf_conntrack_standalone.c |
+++ b/net/netfilter/nf_conntrack_standalone.c |
@@ -17,6 +17,7 @@ |
#include <linux/percpu.h> |
#include <linux/netdevice.h> |
#include <linux/security.h> |
+#include <linux/inet.h> |
#include <net/net_namespace.h> |
#ifdef CONFIG_SYSCTL |
#include <linux/sysctl.h> |
@@ -298,10 +299,66 @@ static int ct_open(struct inode *inode, |
sizeof(struct ct_iter_state)); |
} |
+struct kill_request { |
+ u16 family; |
+ union nf_inet_addr addr; |
+}; |
+ |
+static int kill_matching(struct nf_conn *i, void *data) |
+{ |
+ struct kill_request *kr = data; |
+ struct nf_conntrack_tuple *t1 = &i->tuplehash[IP_CT_DIR_ORIGINAL].tuple; |
+ struct nf_conntrack_tuple *t2 = &i->tuplehash[IP_CT_DIR_REPLY].tuple; |
+ |
+ if (!kr->family) |
+ return 1; |
+ |
+ if (t1->src.l3num != kr->family) |
+ return 0; |
+ |
+ return (nf_inet_addr_cmp(&kr->addr, &t1->src.u3) || |
+ nf_inet_addr_cmp(&kr->addr, &t1->dst.u3) || |
+ nf_inet_addr_cmp(&kr->addr, &t2->src.u3) || |
+ nf_inet_addr_cmp(&kr->addr, &t2->dst.u3)); |
+} |
+ |
+static ssize_t ct_file_write(struct file *file, const char __user *buf, |
+ size_t count, loff_t *ppos) |
+{ |
+ struct seq_file *seq = file->private_data; |
+ struct net *net = seq_file_net(seq); |
+ struct kill_request kr = { }; |
+ char req[INET6_ADDRSTRLEN] = { }; |
+ |
+ if (count == 0) |
+ return 0; |
+ |
+ if (count >= INET6_ADDRSTRLEN) |
+ count = INET6_ADDRSTRLEN - 1; |
+ |
+ if (copy_from_user(req, buf, count)) |
+ return -EFAULT; |
+ |
+ if (strnchr(req, count, ':')) { |
+ kr.family = AF_INET6; |
+ if (!in6_pton(req, count, (void *)&kr.addr, '\n', NULL)) |
+ return -EINVAL; |
+ } else if (strnchr(req, count, '.')) { |
+ kr.family = AF_INET; |
+ if (!in4_pton(req, count, (void *)&kr.addr, '\n', NULL)) |
+ return -EINVAL; |
+ } |
+ |
+ nf_ct_iterate_cleanup(net, kill_matching, &kr, 0, 0); |
+ |
+ return count; |
+} |
+ |
static const struct file_operations ct_file_ops = { |
.owner = THIS_MODULE, |
.open = ct_open, |
.read = seq_read, |
+ .write = ct_file_write, |
.llseek = seq_lseek, |
.release = seq_release_net, |
}; |
@@ -405,7 +462,7 @@ static int nf_conntrack_standalone_init_ |
kuid_t root_uid; |
kgid_t root_gid; |
- pde = proc_create("nf_conntrack", 0440, net->proc_net, &ct_file_ops); |
+ pde = proc_create("nf_conntrack", 0660, net->proc_net, &ct_file_ops); |
if (!pde) |
goto out_nf_conntrack; |
/branches/18.06.1/target/linux/generic/pending-4.9/610-netfilter_match_bypass_default_checks.patch |
---|
@@ -0,0 +1,110 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: kernel: add a new version of my netfilter speedup patches for linux 2.6.39 and 3.0 |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
include/uapi/linux/netfilter_ipv4/ip_tables.h | 1 + |
net/ipv4/netfilter/ip_tables.c | 37 +++++++++++++++++++++++++++ |
2 files changed, 38 insertions(+) |
--- a/include/uapi/linux/netfilter_ipv4/ip_tables.h |
+++ b/include/uapi/linux/netfilter_ipv4/ip_tables.h |
@@ -88,6 +88,7 @@ struct ipt_ip { |
#define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */ |
#define IPT_F_GOTO 0x02 /* Set if jump is a goto */ |
#define IPT_F_MASK 0x03 /* All possible flag bits mask. */ |
+#define IPT_F_NO_DEF_MATCH 0x80 /* Internal: no default match rules present */ |
/* Values for "inv" field in struct ipt_ip. */ |
#define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */ |
--- a/net/ipv4/netfilter/ip_tables.c |
+++ b/net/ipv4/netfilter/ip_tables.c |
@@ -58,6 +58,9 @@ ip_packet_match(const struct iphdr *ip, |
{ |
unsigned long ret; |
+ if (ipinfo->flags & IPT_F_NO_DEF_MATCH) |
+ return true; |
+ |
if (NF_INVF(ipinfo, IPT_INV_SRCIP, |
(ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) || |
NF_INVF(ipinfo, IPT_INV_DSTIP, |
@@ -88,6 +91,29 @@ ip_packet_match(const struct iphdr *ip, |
return true; |
} |
+static void |
+ip_checkdefault(struct ipt_ip *ip) |
+{ |
+ static const char iface_mask[IFNAMSIZ] = {}; |
+ |
+ if (ip->invflags || ip->flags & IPT_F_FRAG) |
+ return; |
+ |
+ if (memcmp(ip->iniface_mask, iface_mask, IFNAMSIZ) != 0) |
+ return; |
+ |
+ if (memcmp(ip->outiface_mask, iface_mask, IFNAMSIZ) != 0) |
+ return; |
+ |
+ if (ip->smsk.s_addr || ip->dmsk.s_addr) |
+ return; |
+ |
+ if (ip->proto) |
+ return; |
+ |
+ ip->flags |= IPT_F_NO_DEF_MATCH; |
+} |
+ |
static bool |
ip_checkentry(const struct ipt_ip *ip) |
{ |
@@ -550,6 +576,8 @@ find_check_entry(struct ipt_entry *e, st |
struct xt_mtchk_param mtpar; |
struct xt_entry_match *ematch; |
+ ip_checkdefault(&e->ip); |
+ |
if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) |
return -ENOMEM; |
@@ -830,6 +858,7 @@ copy_entries_to_user(unsigned int total_ |
const struct xt_table_info *private = table->private; |
int ret = 0; |
const void *loc_cpu_entry; |
+ u8 flags; |
counters = alloc_counters(table); |
if (IS_ERR(counters)) |
@@ -857,6 +886,14 @@ copy_entries_to_user(unsigned int total_ |
goto free_counters; |
} |
+ flags = e->ip.flags & IPT_F_MASK; |
+ if (copy_to_user(userptr + off |
+ + offsetof(struct ipt_entry, ip.flags), |
+ &flags, sizeof(flags)) != 0) { |
+ ret = -EFAULT; |
+ goto free_counters; |
+ } |
+ |
for (i = sizeof(struct ipt_entry); |
i < e->target_offset; |
i += m->u.match_size) { |
@@ -1246,12 +1283,15 @@ compat_copy_entry_to_user(struct ipt_ent |
compat_uint_t origsize; |
const struct xt_entry_match *ematch; |
int ret = 0; |
+ u8 flags = e->ip.flags & IPT_F_MASK; |
origsize = *size; |
ce = (struct compat_ipt_entry __user *)*dstptr; |
if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 || |
copy_to_user(&ce->counters, &counters[i], |
- sizeof(counters[i])) != 0) |
+ sizeof(counters[i])) != 0 || |
+ copy_to_user(&ce->ip.flags, &flags, |
+ sizeof(flags)) != 0) |
return -EFAULT; |
*dstptr += sizeof(struct compat_ipt_entry); |
/branches/18.06.1/target/linux/generic/pending-4.9/611-netfilter_match_bypass_default_table.patch |
---|
@@ -0,0 +1,119 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: netfilter: match bypass default table |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
net/ipv4/netfilter/ip_tables.c | 79 +++++++++++++++++++++++++++++++----------- |
1 file changed, 58 insertions(+), 21 deletions(-) |
--- a/net/ipv4/netfilter/ip_tables.c |
+++ b/net/ipv4/netfilter/ip_tables.c |
@@ -254,6 +254,33 @@ struct ipt_entry *ipt_next_entry(const s |
return (void *)entry + entry->next_offset; |
} |
+static bool |
+ipt_handle_default_rule(struct ipt_entry *e, unsigned int *verdict) |
+{ |
+ struct xt_entry_target *t; |
+ struct xt_standard_target *st; |
+ |
+ if (e->target_offset != sizeof(struct ipt_entry)) |
+ return false; |
+ |
+ if (!(e->ip.flags & IPT_F_NO_DEF_MATCH)) |
+ return false; |
+ |
+ t = ipt_get_target(e); |
+ if (t->u.kernel.target->target) |
+ return false; |
+ |
+ st = (struct xt_standard_target *) t; |
+ if (st->verdict == XT_RETURN) |
+ return false; |
+ |
+ if (st->verdict >= 0) |
+ return false; |
+ |
+ *verdict = (unsigned)(-st->verdict) - 1; |
+ return true; |
+} |
+ |
/* Returns one of the generic firewall policies, like NF_ACCEPT. */ |
unsigned int |
ipt_do_table(struct sk_buff *skb, |
@@ -274,28 +301,8 @@ ipt_do_table(struct sk_buff *skb, |
unsigned int addend; |
/* Initialization */ |
- stackidx = 0; |
- ip = ip_hdr(skb); |
- indev = state->in ? state->in->name : nulldevname; |
- outdev = state->out ? state->out->name : nulldevname; |
- /* We handle fragments by dealing with the first fragment as |
- * if it was a normal packet. All other fragments are treated |
- * normally, except that they will NEVER match rules that ask |
- * things we don't know, ie. tcp syn flag or ports). If the |
- * rule is also a fragment-specific rule, non-fragments won't |
- * match it. */ |
- acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; |
- acpar.thoff = ip_hdrlen(skb); |
- acpar.hotdrop = false; |
- acpar.net = state->net; |
- acpar.in = state->in; |
- acpar.out = state->out; |
- acpar.family = NFPROTO_IPV4; |
- acpar.hooknum = hook; |
- |
IP_NF_ASSERT(table->valid_hooks & (1 << hook)); |
local_bh_disable(); |
- addend = xt_write_recseq_begin(); |
private = table->private; |
cpu = smp_processor_id(); |
/* |
@@ -304,6 +311,23 @@ ipt_do_table(struct sk_buff *skb, |
*/ |
smp_read_barrier_depends(); |
table_base = private->entries; |
+ |
+ e = get_entry(table_base, private->hook_entry[hook]); |
+ if (ipt_handle_default_rule(e, &verdict)) { |
+ struct xt_counters *counter; |
+ |
+ counter = xt_get_this_cpu_counter(&e->counters); |
+ ADD_COUNTER(*counter, skb->len, 1); |
+ local_bh_enable(); |
+ return verdict; |
+ } |
+ |
+ stackidx = 0; |
+ ip = ip_hdr(skb); |
+ indev = state->in ? state->in->name : nulldevname; |
+ outdev = state->out ? state->out->name : nulldevname; |
+ |
+ addend = xt_write_recseq_begin(); |
jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; |
/* Switch to alternate jumpstack if we're being invoked via TEE. |
@@ -316,7 +340,20 @@ ipt_do_table(struct sk_buff *skb, |
if (static_key_false(&xt_tee_enabled)) |
jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated); |
- e = get_entry(table_base, private->hook_entry[hook]); |
+ /* We handle fragments by dealing with the first fragment as |
+ * if it was a normal packet. All other fragments are treated |
+ * normally, except that they will NEVER match rules that ask |
+ * things we don't know, ie. tcp syn flag or ports). If the |
+ * rule is also a fragment-specific rule, non-fragments won't |
+ * match it. */ |
+ acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; |
+ acpar.thoff = ip_hdrlen(skb); |
+ acpar.hotdrop = false; |
+ acpar.net = state->net; |
+ acpar.in = state->in; |
+ acpar.out = state->out; |
+ acpar.family = NFPROTO_IPV4; |
+ acpar.hooknum = hook; |
do { |
const struct xt_entry_target *t; |
/branches/18.06.1/target/linux/generic/pending-4.9/612-netfilter_match_reduce_memory_access.patch |
---|
@@ -0,0 +1,22 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: netfilter: reduce match memory access |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
net/ipv4/netfilter/ip_tables.c | 4 ++-- |
1 file changed, 2 insertions(+), 2 deletions(-) |
--- a/net/ipv4/netfilter/ip_tables.c |
+++ b/net/ipv4/netfilter/ip_tables.c |
@@ -61,9 +61,9 @@ ip_packet_match(const struct iphdr *ip, |
if (ipinfo->flags & IPT_F_NO_DEF_MATCH) |
return true; |
- if (NF_INVF(ipinfo, IPT_INV_SRCIP, |
+ if (NF_INVF(ipinfo, IPT_INV_SRCIP, ipinfo->smsk.s_addr && |
(ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) || |
- NF_INVF(ipinfo, IPT_INV_DSTIP, |
+ NF_INVF(ipinfo, IPT_INV_DSTIP, ipinfo->dmsk.s_addr && |
(ip->daddr & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr)) |
return false; |
/branches/18.06.1/target/linux/generic/pending-4.9/613-netfilter_optional_tcp_window_check.patch |
---|
@@ -0,0 +1,44 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: netfilter: optional tcp window check |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
net/netfilter/nf_conntrack_proto_tcp.c | 13 +++++++++++++ |
1 file changed, 13 insertions(+) |
--- a/net/netfilter/nf_conntrack_proto_tcp.c |
+++ b/net/netfilter/nf_conntrack_proto_tcp.c |
@@ -33,6 +33,9 @@ |
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h> |
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h> |
+/* Do not check the TCP window for incoming packets */ |
+static int nf_ct_tcp_no_window_check __read_mostly = 1; |
+ |
/* "Be conservative in what you do, |
be liberal in what you accept from others." |
If it's non-zero, we mark only out of window RST segments as INVALID. */ |
@@ -513,6 +516,9 @@ static bool tcp_in_window(const struct n |
s32 receiver_offset; |
bool res, in_recv_win; |
+ if (nf_ct_tcp_no_window_check) |
+ return true; |
+ |
/* |
* Get the required data from the packet. |
*/ |
@@ -1479,6 +1485,13 @@ static struct ctl_table tcp_sysctl_table |
.mode = 0644, |
.proc_handler = proc_dointvec, |
}, |
+ { |
+ .procname = "nf_conntrack_tcp_no_window_check", |
+ .data = &nf_ct_tcp_no_window_check, |
+ .maxlen = sizeof(unsigned int), |
+ .mode = 0644, |
+ .proc_handler = proc_dointvec, |
+ }, |
{ } |
}; |
#endif /* CONFIG_SYSCTL */ |
/branches/18.06.1/target/linux/generic/pending-4.9/616-net_optimize_xfrm_calls.patch |
---|
@@ -0,0 +1,20 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: kernel: add a small xfrm related performance optimization |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
net/netfilter/nf_nat_core.c | 3 +++ |
1 file changed, 3 insertions(+) |
--- a/net/netfilter/nf_nat_core.c |
+++ b/net/netfilter/nf_nat_core.c |
@@ -93,6 +93,9 @@ int nf_xfrm_me_harder(struct net *net, s |
struct dst_entry *dst; |
int err; |
+ if (skb->dev && !dev_net(skb->dev)->xfrm.policy_count[XFRM_POLICY_OUT]) |
+ return 0; |
+ |
err = xfrm_decode_session(skb, &fl, family); |
if (err < 0) |
return err; |
/branches/18.06.1/target/linux/generic/pending-4.9/620-net_sched-codel-do-not-defer-queue-length-update.patch |
---|
@@ -0,0 +1,86 @@ |
From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> |
Date: Mon, 21 Aug 2017 11:14:14 +0300 |
Subject: [PATCH] net_sched/codel: do not defer queue length update |
When codel wants to drop last packet in ->dequeue() it cannot call |
qdisc_tree_reduce_backlog() right away - it will notify parent qdisc |
about zero qlen and HTB/HFSC will deactivate class. The same class will |
be deactivated second time by caller of ->dequeue(). Currently codel and |
fq_codel defer update. This triggers warning in HFSC when it's qlen != 0 |
but there is no active classes. |
This patch update parent queue length immediately: just temporary increase |
qlen around qdisc_tree_reduce_backlog() to prevent first class deactivation |
if we have skb to return. |
This might open another problem in HFSC - now operation peek could fail and |
deactivate parent class. |
Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> |
Link: https://bugzilla.kernel.org/show_bug.cgi?id=109581 |
--- |
--- a/net/sched/sch_codel.c |
+++ b/net/sched/sch_codel.c |
@@ -95,11 +95,17 @@ static struct sk_buff *codel_qdisc_deque |
&q->stats, qdisc_pkt_len, codel_get_enqueue_time, |
drop_func, dequeue_func); |
- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, |
- * or HTB crashes. Defer it for next round. |
+ /* If our qlen is 0 qdisc_tree_reduce_backlog() will deactivate |
+ * parent class, dequeue in parent qdisc will do the same if we |
+ * return skb. Temporary increment qlen if we have skb. |
*/ |
- if (q->stats.drop_count && sch->q.qlen) { |
- qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len); |
+ if (q->stats.drop_count) { |
+ if (skb) |
+ sch->q.qlen++; |
+ qdisc_tree_reduce_backlog(sch, q->stats.drop_count, |
+ q->stats.drop_len); |
+ if (skb) |
+ sch->q.qlen--; |
q->stats.drop_count = 0; |
q->stats.drop_len = 0; |
} |
--- a/net/sched/sch_fq_codel.c |
+++ b/net/sched/sch_fq_codel.c |
@@ -318,6 +318,21 @@ begin: |
flow->dropped += q->cstats.drop_count - prev_drop_count; |
flow->dropped += q->cstats.ecn_mark - prev_ecn_mark; |
+ /* If our qlen is 0 qdisc_tree_reduce_backlog() will deactivate |
+ * parent class, dequeue in parent qdisc will do the same if we |
+ * return skb. Temporary increment qlen if we have skb. |
+ */ |
+ if (q->cstats.drop_count) { |
+ if (skb) |
+ sch->q.qlen++; |
+ qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, |
+ q->cstats.drop_len); |
+ if (skb) |
+ sch->q.qlen--; |
+ q->cstats.drop_count = 0; |
+ q->cstats.drop_len = 0; |
+ } |
+ |
if (!skb) { |
/* force a pass through old_flows to prevent starvation */ |
if ((head == &q->new_flows) && !list_empty(&q->old_flows)) |
@@ -328,15 +343,6 @@ begin: |
} |
qdisc_bstats_update(sch, skb); |
flow->deficit -= qdisc_pkt_len(skb); |
- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, |
- * or HTB crashes. Defer it for next round. |
- */ |
- if (q->cstats.drop_count && sch->q.qlen) { |
- qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, |
- q->cstats.drop_len); |
- q->cstats.drop_count = 0; |
- q->cstats.drop_len = 0; |
- } |
return skb; |
} |
/branches/18.06.1/target/linux/generic/pending-4.9/630-packet_socket_type.patch |
---|
@@ -0,0 +1,138 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: net: add an optimization for dealing with raw sockets |
lede-commit: 4898039703d7315f0f3431c860123338ec3be0f6 |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
include/uapi/linux/if_packet.h | 3 +++ |
net/packet/af_packet.c | 34 +++++++++++++++++++++++++++------- |
net/packet/internal.h | 1 + |
3 files changed, 31 insertions(+), 7 deletions(-) |
--- a/include/uapi/linux/if_packet.h |
+++ b/include/uapi/linux/if_packet.h |
@@ -31,6 +31,8 @@ struct sockaddr_ll { |
#define PACKET_KERNEL 7 /* To kernel space */ |
/* Unused, PACKET_FASTROUTE and PACKET_LOOPBACK are invisible to user space */ |
#define PACKET_FASTROUTE 6 /* Fastrouted frame */ |
+#define PACKET_MASK_ANY 0xffffffff /* mask for packet type bits */ |
+ |
/* Packet socket options */ |
@@ -56,6 +58,7 @@ struct sockaddr_ll { |
#define PACKET_QDISC_BYPASS 20 |
#define PACKET_ROLLOVER_STATS 21 |
#define PACKET_FANOUT_DATA 22 |
+#define PACKET_RECV_TYPE 23 |
#define PACKET_FANOUT_HASH 0 |
#define PACKET_FANOUT_LB 1 |
--- a/net/packet/af_packet.c |
+++ b/net/packet/af_packet.c |
@@ -1780,6 +1780,7 @@ static int packet_rcv_spkt(struct sk_buf |
{ |
struct sock *sk; |
struct sockaddr_pkt *spkt; |
+ struct packet_sock *po; |
/* |
* When we registered the protocol we saved the socket in the data |
@@ -1787,6 +1788,7 @@ static int packet_rcv_spkt(struct sk_buf |
*/ |
sk = pt->af_packet_priv; |
+ po = pkt_sk(sk); |
/* |
* Yank back the headers [hope the device set this |
@@ -1799,7 +1801,7 @@ static int packet_rcv_spkt(struct sk_buf |
* so that this procedure is noop. |
*/ |
- if (skb->pkt_type == PACKET_LOOPBACK) |
+ if (!(po->pkt_type & (1 << skb->pkt_type))) |
goto out; |
if (!net_eq(dev_net(dev), sock_net(sk))) |
@@ -2037,12 +2039,12 @@ static int packet_rcv(struct sk_buff *sk |
unsigned int snaplen, res; |
bool is_drop_n_account = false; |
- if (skb->pkt_type == PACKET_LOOPBACK) |
- goto drop; |
- |
sk = pt->af_packet_priv; |
po = pkt_sk(sk); |
+ if (!(po->pkt_type & (1 << skb->pkt_type))) |
+ goto drop; |
+ |
if (!net_eq(dev_net(dev), sock_net(sk))) |
goto drop; |
@@ -2168,12 +2170,12 @@ static int tpacket_rcv(struct sk_buff *s |
BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); |
BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); |
- if (skb->pkt_type == PACKET_LOOPBACK) |
- goto drop; |
- |
sk = pt->af_packet_priv; |
po = pkt_sk(sk); |
+ if (!(po->pkt_type & (1 << skb->pkt_type))) |
+ goto drop; |
+ |
if (!net_eq(dev_net(dev), sock_net(sk))) |
goto drop; |
@@ -3266,6 +3268,7 @@ static int packet_create(struct net *net |
mutex_init(&po->pg_vec_lock); |
po->rollover = NULL; |
po->prot_hook.func = packet_rcv; |
+ po->pkt_type = PACKET_MASK_ANY & ~(1 << PACKET_LOOPBACK); |
if (sock->type == SOCK_PACKET) |
po->prot_hook.func = packet_rcv_spkt; |
@@ -3879,6 +3882,16 @@ packet_setsockopt(struct socket *sock, i |
po->xmit = val ? packet_direct_xmit : dev_queue_xmit; |
return 0; |
} |
+ case PACKET_RECV_TYPE: |
+ { |
+ unsigned int val; |
+ if (optlen != sizeof(val)) |
+ return -EINVAL; |
+ if (copy_from_user(&val, optval, sizeof(val))) |
+ return -EFAULT; |
+ po->pkt_type = val & ~BIT(PACKET_LOOPBACK); |
+ return 0; |
+ } |
default: |
return -ENOPROTOOPT; |
} |
@@ -3931,6 +3944,13 @@ static int packet_getsockopt(struct sock |
case PACKET_VNET_HDR: |
val = po->has_vnet_hdr; |
break; |
+ case PACKET_RECV_TYPE: |
+ if (len > sizeof(unsigned int)) |
+ len = sizeof(unsigned int); |
+ val = po->pkt_type; |
+ |
+ data = &val; |
+ break; |
case PACKET_VERSION: |
val = po->tp_version; |
break; |
--- a/net/packet/internal.h |
+++ b/net/packet/internal.h |
@@ -128,6 +128,7 @@ struct packet_sock { |
struct net_device __rcu *cached_dev; |
int (*xmit)(struct sk_buff *skb); |
struct packet_type prot_hook ____cacheline_aligned_in_smp; |
+ unsigned int pkt_type; |
}; |
static struct packet_sock *pkt_sk(struct sock *sk) |
/branches/18.06.1/target/linux/generic/pending-4.9/655-increase_skb_pad.patch |
---|
@@ -0,0 +1,20 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: kernel: add a few patches for avoiding unnecessary skb reallocations - significantly improves ethernet<->wireless performance |
lede-commit: 6f89cffc9add6939d44a6b54cf9a5e77849aa7fd |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
include/linux/skbuff.h | 2 +- |
1 file changed, 1 insertion(+), 1 deletion(-) |
--- a/include/linux/skbuff.h |
+++ b/include/linux/skbuff.h |
@@ -2303,7 +2303,7 @@ static inline int pskb_network_may_pull( |
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) |
*/ |
#ifndef NET_SKB_PAD |
-#define NET_SKB_PAD max(32, L1_CACHE_BYTES) |
+#define NET_SKB_PAD max(64, L1_CACHE_BYTES) |
#endif |
int ___pskb_trim(struct sk_buff *skb, unsigned int len); |
/branches/18.06.1/target/linux/generic/pending-4.9/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch |
---|
@@ -0,0 +1,500 @@ |
From: Steven Barth <steven@midlink.org> |
Subject: Add support for MAP-E FMRs (mesh mode) |
MAP-E FMRs (draft-ietf-softwire-map-10) are rules for IPv4-communication |
between MAP CEs (mesh mode) without the need to forward such data to a |
border relay. This is similar to how 6rd works but for IPv4 over IPv6. |
Signed-off-by: Steven Barth <cyrus@openwrt.org> |
--- |
include/net/ip6_tunnel.h | 13 ++ |
include/uapi/linux/if_tunnel.h | 13 ++ |
net/ipv6/ip6_tunnel.c | 276 +++++++++++++++++++++++++++++++++++++++-- |
3 files changed, 291 insertions(+), 11 deletions(-) |
--- a/include/net/ip6_tunnel.h |
+++ b/include/net/ip6_tunnel.h |
@@ -17,6 +17,18 @@ |
/* determine capability on a per-packet basis */ |
#define IP6_TNL_F_CAP_PER_PACKET 0x40000 |
+/* IPv6 tunnel FMR */ |
+struct __ip6_tnl_fmr { |
+ struct __ip6_tnl_fmr *next; /* next fmr in list */ |
+ struct in6_addr ip6_prefix; |
+ struct in_addr ip4_prefix; |
+ |
+ __u8 ip6_prefix_len; |
+ __u8 ip4_prefix_len; |
+ __u8 ea_len; |
+ __u8 offset; |
+}; |
+ |
struct __ip6_tnl_parm { |
char name[IFNAMSIZ]; /* name of tunnel device */ |
int link; /* ifindex of underlying L2 interface */ |
@@ -28,6 +40,7 @@ struct __ip6_tnl_parm { |
__u32 flags; /* tunnel flags */ |
struct in6_addr laddr; /* local tunnel end-point address */ |
struct in6_addr raddr; /* remote tunnel end-point address */ |
+ struct __ip6_tnl_fmr *fmrs; /* FMRs */ |
__be16 i_flags; |
__be16 o_flags; |
--- a/include/uapi/linux/if_tunnel.h |
+++ b/include/uapi/linux/if_tunnel.h |
@@ -75,10 +75,23 @@ enum { |
IFLA_IPTUN_ENCAP_SPORT, |
IFLA_IPTUN_ENCAP_DPORT, |
IFLA_IPTUN_COLLECT_METADATA, |
+ IFLA_IPTUN_FMRS, |
__IFLA_IPTUN_MAX, |
}; |
#define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1) |
+enum { |
+ IFLA_IPTUN_FMR_UNSPEC, |
+ IFLA_IPTUN_FMR_IP6_PREFIX, |
+ IFLA_IPTUN_FMR_IP4_PREFIX, |
+ IFLA_IPTUN_FMR_IP6_PREFIX_LEN, |
+ IFLA_IPTUN_FMR_IP4_PREFIX_LEN, |
+ IFLA_IPTUN_FMR_EA_LEN, |
+ IFLA_IPTUN_FMR_OFFSET, |
+ __IFLA_IPTUN_FMR_MAX, |
+}; |
+#define IFLA_IPTUN_FMR_MAX (__IFLA_IPTUN_FMR_MAX - 1) |
+ |
enum tunnel_encap_types { |
TUNNEL_ENCAP_NONE, |
TUNNEL_ENCAP_FOU, |
--- a/net/ipv6/ip6_tunnel.c |
+++ b/net/ipv6/ip6_tunnel.c |
@@ -16,6 +16,8 @@ |
* as published by the Free Software Foundation; either version |
* 2 of the License, or (at your option) any later version. |
* |
+ * Changes: |
+ * Steven Barth <cyrus@openwrt.org>: MAP-E FMR support |
*/ |
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
@@ -72,9 +74,9 @@ static bool log_ecn_error = true; |
module_param(log_ecn_error, bool, 0644); |
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); |
-static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) |
+static u32 HASH(const struct in6_addr *addr) |
{ |
- u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); |
+ u32 hash = ipv6_addr_hash(addr); |
return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT); |
} |
@@ -141,20 +143,29 @@ static struct net_device_stats *ip6_get_ |
static struct ip6_tnl * |
ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local) |
{ |
- unsigned int hash = HASH(remote, local); |
+ unsigned int hash = HASH(local); |
struct ip6_tnl *t; |
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); |
struct in6_addr any; |
+ struct __ip6_tnl_fmr *fmr; |
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { |
- if (ipv6_addr_equal(local, &t->parms.laddr) && |
- ipv6_addr_equal(remote, &t->parms.raddr) && |
- (t->dev->flags & IFF_UP)) |
+ if (!ipv6_addr_equal(local, &t->parms.laddr) || |
+ !(t->dev->flags & IFF_UP)) |
+ continue; |
+ |
+ if (ipv6_addr_equal(remote, &t->parms.raddr)) |
return t; |
+ |
+ for (fmr = t->parms.fmrs; fmr; fmr = fmr->next) { |
+ if (ipv6_prefix_equal(remote, &fmr->ip6_prefix, |
+ fmr->ip6_prefix_len)) |
+ return t; |
+ } |
} |
memset(&any, 0, sizeof(any)); |
- hash = HASH(&any, local); |
+ hash = HASH(local); |
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { |
if (ipv6_addr_equal(local, &t->parms.laddr) && |
ipv6_addr_any(&t->parms.raddr) && |
@@ -162,7 +173,7 @@ ip6_tnl_lookup(struct net *net, const st |
return t; |
} |
- hash = HASH(remote, &any); |
+ hash = HASH(&any); |
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { |
if (ipv6_addr_equal(remote, &t->parms.raddr) && |
ipv6_addr_any(&t->parms.laddr) && |
@@ -202,7 +213,7 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n, |
if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { |
prio = 1; |
- h = HASH(remote, local); |
+ h = HASH(local); |
} |
return &ip6n->tnls[prio][h]; |
} |
@@ -384,6 +395,12 @@ ip6_tnl_dev_uninit(struct net_device *de |
struct net *net = t->net; |
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); |
+ while (t->parms.fmrs) { |
+ struct __ip6_tnl_fmr *next = t->parms.fmrs->next; |
+ kfree(t->parms.fmrs); |
+ t->parms.fmrs = next; |
+ } |
+ |
if (dev == ip6n->fb_tnl_dev) |
RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); |
else |
@@ -780,6 +797,107 @@ int ip6_tnl_rcv_ctl(struct ip6_tnl *t, |
} |
EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl); |
+/** |
+ * ip4ip6_fmr_calc - calculate target / source IPv6-address based on FMR |
+ * @dest: destination IPv6 address buffer |
+ * @skb: received socket buffer |
+ * @fmr: MAP FMR |
+ * @xmit: Calculate for xmit or rcv |
+ **/ |
+static void ip4ip6_fmr_calc(struct in6_addr *dest, |
+ const struct iphdr *iph, const uint8_t *end, |
+ const struct __ip6_tnl_fmr *fmr, bool xmit) |
+{ |
+ int psidlen = fmr->ea_len - (32 - fmr->ip4_prefix_len); |
+ u8 *portp = NULL; |
+ bool use_dest_addr; |
+ const struct iphdr *dsth = iph; |
+ |
+ if ((u8*)dsth >= end) |
+ return; |
+ |
+ /* find significant IP header */ |
+ if (iph->protocol == IPPROTO_ICMP) { |
+ struct icmphdr *ih = (struct icmphdr*)(((u8*)dsth) + dsth->ihl * 4); |
+ if (ih && ((u8*)&ih[1]) <= end && ( |
+ ih->type == ICMP_DEST_UNREACH || |
+ ih->type == ICMP_SOURCE_QUENCH || |
+ ih->type == ICMP_TIME_EXCEEDED || |
+ ih->type == ICMP_PARAMETERPROB || |
+ ih->type == ICMP_REDIRECT)) |
+ dsth = (const struct iphdr*)&ih[1]; |
+ } |
+ |
+ /* in xmit-path use dest port by default and source port only if |
+ this is an ICMP reply to something else; vice versa in rcv-path */ |
+ use_dest_addr = (xmit && dsth == iph) || (!xmit && dsth != iph); |
+ |
+ /* get dst port */ |
+ if (((u8*)&dsth[1]) <= end && ( |
+ dsth->protocol == IPPROTO_UDP || |
+ dsth->protocol == IPPROTO_TCP || |
+ dsth->protocol == IPPROTO_SCTP || |
+ dsth->protocol == IPPROTO_DCCP)) { |
+ /* for UDP, TCP, SCTP and DCCP source and dest port |
+ follow IPv4 header directly */ |
+ portp = ((u8*)dsth) + dsth->ihl * 4; |
+ |
+ if (use_dest_addr) |
+ portp += sizeof(u16); |
+ } else if (iph->protocol == IPPROTO_ICMP) { |
+ struct icmphdr *ih = (struct icmphdr*)(((u8*)dsth) + dsth->ihl * 4); |
+ |
+ /* use icmp identifier as port */ |
+ if (((u8*)&ih) <= end && ( |
+ (use_dest_addr && ( |
+ ih->type == ICMP_ECHOREPLY || |
+ ih->type == ICMP_TIMESTAMPREPLY || |
+ ih->type == ICMP_INFO_REPLY || |
+ ih->type == ICMP_ADDRESSREPLY)) || |
+ (!use_dest_addr && ( |
+ ih->type == ICMP_ECHO || |
+ ih->type == ICMP_TIMESTAMP || |
+ ih->type == ICMP_INFO_REQUEST || |
+ ih->type == ICMP_ADDRESS) |
+ ))) |
+ portp = (u8*)&ih->un.echo.id; |
+ } |
+ |
+ if ((portp && &portp[2] <= end) || psidlen == 0) { |
+ int frombyte = fmr->ip6_prefix_len / 8; |
+ int fromrem = fmr->ip6_prefix_len % 8; |
+ int bytes = sizeof(struct in6_addr) - frombyte; |
+ const u32 *addr = (use_dest_addr) ? &iph->daddr : &iph->saddr; |
+ u64 eabits = ((u64)ntohl(*addr)) << (32 + fmr->ip4_prefix_len); |
+ u64 t = 0; |
+ |
+ /* extract PSID from port and add it to eabits */ |
+ u16 psidbits = 0; |
+ if (psidlen > 0) { |
+ psidbits = ((u16)portp[0]) << 8 | ((u16)portp[1]); |
+ psidbits >>= 16 - psidlen - fmr->offset; |
+ psidbits = (u16)(psidbits << (16 - psidlen)); |
+ eabits |= ((u64)psidbits) << (48 - (fmr->ea_len - psidlen)); |
+ } |
+ |
+ /* rewrite destination address */ |
+ *dest = fmr->ip6_prefix; |
+ memcpy(&dest->s6_addr[10], addr, sizeof(*addr)); |
+ dest->s6_addr16[7] = htons(psidbits >> (16 - psidlen)); |
+ |
+ if (bytes > sizeof(u64)) |
+ bytes = sizeof(u64); |
+ |
+ /* insert eabits */ |
+ memcpy(&t, &dest->s6_addr[frombyte], bytes); |
+ t = be64_to_cpu(t) & ~(((((u64)1) << fmr->ea_len) - 1) |
+ << (64 - fmr->ea_len - fromrem)); |
+ t = cpu_to_be64(t | (eabits >> fromrem)); |
+ memcpy(&dest->s6_addr[frombyte], &t, bytes); |
+ } |
+} |
+ |
+ |
static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb, |
const struct tnl_ptk_info *tpi, |
struct metadata_dst *tun_dst, |
@@ -832,6 +950,27 @@ static int __ip6_tnl_rcv(struct ip6_tnl |
skb_reset_network_header(skb); |
memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); |
+ if (tpi->proto == htons(ETH_P_IP) && tunnel->parms.fmrs && |
+ !ipv6_addr_equal(&ipv6h->saddr, &tunnel->parms.raddr)) { |
+ /* Packet didn't come from BR, so lookup FMR */ |
+ struct __ip6_tnl_fmr *fmr; |
+ struct in6_addr expected = tunnel->parms.raddr; |
+ for (fmr = tunnel->parms.fmrs; fmr; fmr = fmr->next) |
+ if (ipv6_prefix_equal(&ipv6h->saddr, |
+ &fmr->ip6_prefix, fmr->ip6_prefix_len)) |
+ break; |
+ |
+ /* Check that IPv6 matches IPv4 source to prevent spoofing */ |
+ if (fmr) |
+ ip4ip6_fmr_calc(&expected, ip_hdr(skb), |
+ skb_tail_pointer(skb), fmr, false); |
+ |
+ if (!ipv6_addr_equal(&ipv6h->saddr, &expected)) { |
+ rcu_read_unlock(); |
+ goto drop; |
+ } |
+ } |
+ |
__skb_tunnel_rx(skb, tunnel->dev, tunnel->net); |
err = dscp_ecn_decapsulate(tunnel, ipv6h, skb); |
@@ -962,6 +1101,7 @@ static void init_tel_txopt(struct ipv6_t |
opt->ops.opt_nflen = 8; |
} |
+ |
/** |
* ip6_tnl_addr_conflict - compare packet addresses to tunnel's own |
* @t: the outgoing tunnel device |
@@ -1300,6 +1440,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, str |
{ |
struct ip6_tnl *t = netdev_priv(dev); |
struct ipv6hdr *ipv6h; |
+ struct __ip6_tnl_fmr *fmr; |
int encap_limit = -1; |
__u16 offset; |
struct flowi6 fl6; |
@@ -1362,6 +1503,18 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, str |
fl6.flowi6_mark = skb->mark; |
} |
+ /* try to find matching FMR */ |
+ for (fmr = t->parms.fmrs; fmr; fmr = fmr->next) { |
+ unsigned mshift = 32 - fmr->ip4_prefix_len; |
+ if (ntohl(fmr->ip4_prefix.s_addr) >> mshift == |
+ ntohl(ip_hdr(skb)->daddr) >> mshift) |
+ break; |
+ } |
+ |
+ /* change dstaddr according to FMR */ |
+ if (fmr) |
+ ip4ip6_fmr_calc(&fl6.daddr, ip_hdr(skb), skb_tail_pointer(skb), fmr, true); |
+ |
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) |
return -1; |
@@ -1489,6 +1642,14 @@ ip6_tnl_change(struct ip6_tnl *t, const |
t->parms.flowinfo = p->flowinfo; |
t->parms.link = p->link; |
t->parms.proto = p->proto; |
+ |
+ while (t->parms.fmrs) { |
+ struct __ip6_tnl_fmr *next = t->parms.fmrs->next; |
+ kfree(t->parms.fmrs); |
+ t->parms.fmrs = next; |
+ } |
+ t->parms.fmrs = p->fmrs; |
+ |
dst_cache_reset(&t->dst_cache); |
ip6_tnl_link_config(t); |
return 0; |
@@ -1527,6 +1688,7 @@ ip6_tnl_parm_from_user(struct __ip6_tnl_ |
p->flowinfo = u->flowinfo; |
p->link = u->link; |
p->proto = u->proto; |
+ p->fmrs = NULL; |
memcpy(p->name, u->name, sizeof(u->name)); |
} |
@@ -1904,6 +2066,15 @@ static int ip6_tnl_validate(struct nlatt |
return 0; |
} |
+static const struct nla_policy ip6_tnl_fmr_policy[IFLA_IPTUN_FMR_MAX + 1] = { |
+ [IFLA_IPTUN_FMR_IP6_PREFIX] = { .len = sizeof(struct in6_addr) }, |
+ [IFLA_IPTUN_FMR_IP4_PREFIX] = { .len = sizeof(struct in_addr) }, |
+ [IFLA_IPTUN_FMR_IP6_PREFIX_LEN] = { .type = NLA_U8 }, |
+ [IFLA_IPTUN_FMR_IP4_PREFIX_LEN] = { .type = NLA_U8 }, |
+ [IFLA_IPTUN_FMR_EA_LEN] = { .type = NLA_U8 }, |
+ [IFLA_IPTUN_FMR_OFFSET] = { .type = NLA_U8 } |
+}; |
+ |
static void ip6_tnl_netlink_parms(struct nlattr *data[], |
struct __ip6_tnl_parm *parms) |
{ |
@@ -1938,6 +2109,46 @@ static void ip6_tnl_netlink_parms(struct |
if (data[IFLA_IPTUN_COLLECT_METADATA]) |
parms->collect_md = true; |
+ |
+ if (data[IFLA_IPTUN_FMRS]) { |
+ unsigned rem; |
+ struct nlattr *fmr; |
+ nla_for_each_nested(fmr, data[IFLA_IPTUN_FMRS], rem) { |
+ struct nlattr *fmrd[IFLA_IPTUN_FMR_MAX + 1], *c; |
+ struct __ip6_tnl_fmr *nfmr; |
+ |
+ nla_parse_nested(fmrd, IFLA_IPTUN_FMR_MAX, |
+ fmr, ip6_tnl_fmr_policy); |
+ |
+ if (!(nfmr = kzalloc(sizeof(*nfmr), GFP_KERNEL))) |
+ continue; |
+ |
+ nfmr->offset = 6; |
+ |
+ if ((c = fmrd[IFLA_IPTUN_FMR_IP6_PREFIX])) |
+ nla_memcpy(&nfmr->ip6_prefix, fmrd[IFLA_IPTUN_FMR_IP6_PREFIX], |
+ sizeof(nfmr->ip6_prefix)); |
+ |
+ if ((c = fmrd[IFLA_IPTUN_FMR_IP4_PREFIX])) |
+ nla_memcpy(&nfmr->ip4_prefix, fmrd[IFLA_IPTUN_FMR_IP4_PREFIX], |
+ sizeof(nfmr->ip4_prefix)); |
+ |
+ if ((c = fmrd[IFLA_IPTUN_FMR_IP6_PREFIX_LEN])) |
+ nfmr->ip6_prefix_len = nla_get_u8(c); |
+ |
+ if ((c = fmrd[IFLA_IPTUN_FMR_IP4_PREFIX_LEN])) |
+ nfmr->ip4_prefix_len = nla_get_u8(c); |
+ |
+ if ((c = fmrd[IFLA_IPTUN_FMR_EA_LEN])) |
+ nfmr->ea_len = nla_get_u8(c); |
+ |
+ if ((c = fmrd[IFLA_IPTUN_FMR_OFFSET])) |
+ nfmr->offset = nla_get_u8(c); |
+ |
+ nfmr->next = parms->fmrs; |
+ parms->fmrs = nfmr; |
+ } |
+ } |
} |
static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[], |
@@ -2051,6 +2262,12 @@ static void ip6_tnl_dellink(struct net_d |
static size_t ip6_tnl_get_size(const struct net_device *dev) |
{ |
+ const struct ip6_tnl *t = netdev_priv(dev); |
+ struct __ip6_tnl_fmr *c; |
+ int fmrs = 0; |
+ for (c = t->parms.fmrs; c; c = c->next) |
+ ++fmrs; |
+ |
return |
/* IFLA_IPTUN_LINK */ |
nla_total_size(4) + |
@@ -2078,6 +2295,24 @@ static size_t ip6_tnl_get_size(const str |
nla_total_size(2) + |
/* IFLA_IPTUN_COLLECT_METADATA */ |
nla_total_size(0) + |
+ /* IFLA_IPTUN_FMRS */ |
+ nla_total_size(0) + |
+ ( |
+ /* nest */ |
+ nla_total_size(0) + |
+ /* IFLA_IPTUN_FMR_IP6_PREFIX */ |
+ nla_total_size(sizeof(struct in6_addr)) + |
+ /* IFLA_IPTUN_FMR_IP4_PREFIX */ |
+ nla_total_size(sizeof(struct in_addr)) + |
+ /* IFLA_IPTUN_FMR_EA_LEN */ |
+ nla_total_size(1) + |
+ /* IFLA_IPTUN_FMR_IP6_PREFIX_LEN */ |
+ nla_total_size(1) + |
+ /* IFLA_IPTUN_FMR_IP4_PREFIX_LEN */ |
+ nla_total_size(1) + |
+ /* IFLA_IPTUN_FMR_OFFSET */ |
+ nla_total_size(1) |
+ ) * fmrs + |
0; |
} |
@@ -2085,6 +2320,9 @@ static int ip6_tnl_fill_info(struct sk_b |
{ |
struct ip6_tnl *tunnel = netdev_priv(dev); |
struct __ip6_tnl_parm *parm = &tunnel->parms; |
+ struct __ip6_tnl_fmr *c; |
+ int fmrcnt = 0; |
+ struct nlattr *fmrs; |
if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || |
nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) || |
@@ -2093,9 +2331,27 @@ static int ip6_tnl_fill_info(struct sk_b |
nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) || |
nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) || |
nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) || |
- nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto)) |
+ nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) || |
+ !(fmrs = nla_nest_start(skb, IFLA_IPTUN_FMRS))) |
goto nla_put_failure; |
+ for (c = parm->fmrs; c; c = c->next) { |
+ struct nlattr *fmr = nla_nest_start(skb, ++fmrcnt); |
+ if (!fmr || |
+ nla_put(skb, IFLA_IPTUN_FMR_IP6_PREFIX, |
+ sizeof(c->ip6_prefix), &c->ip6_prefix) || |
+ nla_put(skb, IFLA_IPTUN_FMR_IP4_PREFIX, |
+ sizeof(c->ip4_prefix), &c->ip4_prefix) || |
+ nla_put_u8(skb, IFLA_IPTUN_FMR_IP6_PREFIX_LEN, c->ip6_prefix_len) || |
+ nla_put_u8(skb, IFLA_IPTUN_FMR_IP4_PREFIX_LEN, c->ip4_prefix_len) || |
+ nla_put_u8(skb, IFLA_IPTUN_FMR_EA_LEN, c->ea_len) || |
+ nla_put_u8(skb, IFLA_IPTUN_FMR_OFFSET, c->offset)) |
+ goto nla_put_failure; |
+ |
+ nla_nest_end(skb, fmr); |
+ } |
+ nla_nest_end(skb, fmrs); |
+ |
if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) || |
nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) || |
nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) || |
@@ -2133,6 +2389,7 @@ static const struct nla_policy ip6_tnl_p |
[IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 }, |
[IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 }, |
[IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG }, |
+ [IFLA_IPTUN_FMRS] = { .type = NLA_NESTED }, |
}; |
static struct rtnl_link_ops ip6_link_ops __read_mostly = { |
/branches/18.06.1/target/linux/generic/pending-4.9/670-ipv6-allow-rejecting-with-source-address-failed-policy.patch |
---|
@@ -0,0 +1,255 @@ |
From: Jonas Gorski <jogo@openwrt.org> |
Subject: ipv6: allow rejecting with "source address failed policy" |
RFC6204 L-14 requires rejecting traffic from invalid addresses with |
ICMPv6 Destination Unreachable, Code 5 (Source address failed ingress/ |
egress policy) on the LAN side, so add an appropriate rule for that. |
Signed-off-by: Jonas Gorski <jogo@openwrt.org> |
--- |
include/net/netns/ipv6.h | 1 + |
include/uapi/linux/fib_rules.h | 4 +++ |
include/uapi/linux/rtnetlink.h | 1 + |
net/ipv4/fib_semantics.c | 4 +++ |
net/ipv4/fib_trie.c | 1 + |
net/ipv4/ipmr.c | 1 + |
net/ipv6/fib6_rules.c | 4 +++ |
net/ipv6/ip6mr.c | 2 ++ |
net/ipv6/route.c | 58 +++++++++++++++++++++++++++++++++++++++++- |
9 files changed, 75 insertions(+), 1 deletion(-) |
--- a/include/net/netns/ipv6.h |
+++ b/include/net/netns/ipv6.h |
@@ -66,6 +66,7 @@ struct netns_ipv6 { |
unsigned long ip6_rt_last_gc; |
#ifdef CONFIG_IPV6_MULTIPLE_TABLES |
struct rt6_info *ip6_prohibit_entry; |
+ struct rt6_info *ip6_policy_failed_entry; |
struct rt6_info *ip6_blk_hole_entry; |
struct fib6_table *fib6_local_tbl; |
struct fib_rules_ops *fib6_rules_ops; |
--- a/include/uapi/linux/fib_rules.h |
+++ b/include/uapi/linux/fib_rules.h |
@@ -66,6 +66,10 @@ enum { |
FR_ACT_BLACKHOLE, /* Drop without notification */ |
FR_ACT_UNREACHABLE, /* Drop with ENETUNREACH */ |
FR_ACT_PROHIBIT, /* Drop with EACCES */ |
+ FR_ACT_RES9, |
+ FR_ACT_RES10, |
+ FR_ACT_RES11, |
+ FR_ACT_POLICY_FAILED, /* Drop with EACCES */ |
__FR_ACT_MAX, |
}; |
--- a/include/uapi/linux/rtnetlink.h |
+++ b/include/uapi/linux/rtnetlink.h |
@@ -215,6 +215,7 @@ enum { |
RTN_THROW, /* Not in this table */ |
RTN_NAT, /* Translate this address */ |
RTN_XRESOLVE, /* Use external resolver */ |
+ RTN_POLICY_FAILED, /* Failed ingress/egress policy */ |
__RTN_MAX |
}; |
--- a/net/ipv4/fib_semantics.c |
+++ b/net/ipv4/fib_semantics.c |
@@ -138,6 +138,10 @@ const struct fib_prop fib_props[RTN_MAX |
.error = -EINVAL, |
.scope = RT_SCOPE_NOWHERE, |
}, |
+ [RTN_POLICY_FAILED] = { |
+ .error = -EACCES, |
+ .scope = RT_SCOPE_UNIVERSE, |
+ }, |
}; |
static void rt_fibinfo_free(struct rtable __rcu **rtp) |
--- a/net/ipv4/fib_trie.c |
+++ b/net/ipv4/fib_trie.c |
@@ -2406,6 +2406,7 @@ static const char *const rtn_type_names[ |
[RTN_THROW] = "THROW", |
[RTN_NAT] = "NAT", |
[RTN_XRESOLVE] = "XRESOLVE", |
+ [RTN_POLICY_FAILED] = "POLICY_FAILED", |
}; |
static inline const char *rtn_type(char *buf, size_t len, unsigned int t) |
--- a/net/ipv4/ipmr.c |
+++ b/net/ipv4/ipmr.c |
@@ -159,6 +159,7 @@ static int ipmr_rule_action(struct fib_r |
case FR_ACT_UNREACHABLE: |
return -ENETUNREACH; |
case FR_ACT_PROHIBIT: |
+ case FR_ACT_POLICY_FAILED: |
return -EACCES; |
case FR_ACT_BLACKHOLE: |
default: |
--- a/net/ipv6/fib6_rules.c |
+++ b/net/ipv6/fib6_rules.c |
@@ -77,6 +77,10 @@ static int fib6_rule_action(struct fib_r |
err = -EACCES; |
rt = net->ipv6.ip6_prohibit_entry; |
goto discard_pkt; |
+ case FR_ACT_POLICY_FAILED: |
+ err = -EACCES; |
+ rt = net->ipv6.ip6_policy_failed_entry; |
+ goto discard_pkt; |
} |
tb_id = fib_rule_get_table(rule, arg); |
--- a/net/ipv6/ip6mr.c |
+++ b/net/ipv6/ip6mr.c |
@@ -169,6 +169,8 @@ static int ip6mr_rule_action(struct fib_ |
return -ENETUNREACH; |
case FR_ACT_PROHIBIT: |
return -EACCES; |
+ case FR_ACT_POLICY_FAILED: |
+ return -EACCES; |
case FR_ACT_BLACKHOLE: |
default: |
return -EINVAL; |
--- a/net/ipv6/route.c |
+++ b/net/ipv6/route.c |
@@ -91,6 +91,8 @@ static int ip6_pkt_discard(struct sk_bu |
static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb); |
static int ip6_pkt_prohibit(struct sk_buff *skb); |
static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb); |
+static int ip6_pkt_policy_failed(struct sk_buff *skb); |
+static int ip6_pkt_policy_failed_out(struct net *net, struct sock *sk, struct sk_buff *skb); |
static void ip6_link_failure(struct sk_buff *skb); |
static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, |
struct sk_buff *skb, u32 mtu); |
@@ -300,6 +302,21 @@ static const struct rt6_info ip6_prohibi |
.rt6i_ref = ATOMIC_INIT(1), |
}; |
+static const struct rt6_info ip6_policy_failed_entry_template = { |
+ .dst = { |
+ .__refcnt = ATOMIC_INIT(1), |
+ .__use = 1, |
+ .obsolete = DST_OBSOLETE_FORCE_CHK, |
+ .error = -EACCES, |
+ .input = ip6_pkt_policy_failed, |
+ .output = ip6_pkt_policy_failed_out, |
+ }, |
+ .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), |
+ .rt6i_protocol = RTPROT_KERNEL, |
+ .rt6i_metric = ~(u32) 0, |
+ .rt6i_ref = ATOMIC_INIT(1), |
+}; |
+ |
static const struct rt6_info ip6_blk_hole_entry_template = { |
.dst = { |
.__refcnt = ATOMIC_INIT(1), |
@@ -1970,6 +1987,11 @@ static struct rt6_info *ip6_route_info_c |
rt->dst.output = ip6_pkt_prohibit_out; |
rt->dst.input = ip6_pkt_prohibit; |
break; |
+ case RTN_POLICY_FAILED: |
+ rt->dst.error = -EACCES; |
+ rt->dst.output = ip6_pkt_policy_failed_out; |
+ rt->dst.input = ip6_pkt_policy_failed; |
+ break; |
case RTN_THROW: |
case RTN_UNREACHABLE: |
default: |
@@ -2613,6 +2635,17 @@ static int ip6_pkt_prohibit_out(struct n |
return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); |
} |
+static int ip6_pkt_policy_failed(struct sk_buff *skb) |
+{ |
+ return ip6_pkt_drop(skb, ICMPV6_POLICY_FAIL, IPSTATS_MIB_INNOROUTES); |
+} |
+ |
+static int ip6_pkt_policy_failed_out(struct net *net, struct sock *sk, struct sk_buff *skb) |
+{ |
+ skb->dev = skb_dst(skb)->dev; |
+ return ip6_pkt_drop(skb, ICMPV6_POLICY_FAIL, IPSTATS_MIB_OUTNOROUTES); |
+} |
+ |
/* |
* Allocate a dst for local (unicast / anycast) address. |
*/ |
@@ -2850,7 +2883,8 @@ static int rtm_to_fib6_config(struct sk_ |
if (rtm->rtm_type == RTN_UNREACHABLE || |
rtm->rtm_type == RTN_BLACKHOLE || |
rtm->rtm_type == RTN_PROHIBIT || |
- rtm->rtm_type == RTN_THROW) |
+ rtm->rtm_type == RTN_THROW || |
+ rtm->rtm_type == RTN_POLICY_FAILED) |
cfg->fc_flags |= RTF_REJECT; |
if (rtm->rtm_type == RTN_LOCAL) |
@@ -3222,6 +3256,9 @@ static int rt6_fill_node(struct net *net |
case -EACCES: |
rtm->rtm_type = RTN_PROHIBIT; |
break; |
+ case -EPERM: |
+ rtm->rtm_type = RTN_POLICY_FAILED; |
+ break; |
case -EAGAIN: |
rtm->rtm_type = RTN_THROW; |
break; |
@@ -3498,6 +3535,8 @@ static int ip6_route_dev_notify(struct n |
#ifdef CONFIG_IPV6_MULTIPLE_TABLES |
net->ipv6.ip6_prohibit_entry->dst.dev = dev; |
net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev); |
+ net->ipv6.ip6_policy_failed_entry->dst.dev = dev; |
+ net->ipv6.ip6_policy_failed_entry->rt6i_idev = in6_dev_get(dev); |
net->ipv6.ip6_blk_hole_entry->dst.dev = dev; |
net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); |
#endif |
@@ -3509,6 +3548,7 @@ static int ip6_route_dev_notify(struct n |
in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev); |
#ifdef CONFIG_IPV6_MULTIPLE_TABLES |
in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev); |
+ in6_dev_put(net->ipv6.ip6_policy_failed_entry->rt6i_idev); |
in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev); |
#endif |
} |
@@ -3724,6 +3764,17 @@ static int __net_init ip6_route_net_init |
net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops; |
dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst, |
ip6_template_metrics, true); |
+ |
+ net->ipv6.ip6_policy_failed_entry = |
+ kmemdup(&ip6_policy_failed_entry_template, |
+ sizeof(*net->ipv6.ip6_policy_failed_entry), GFP_KERNEL); |
+ if (!net->ipv6.ip6_policy_failed_entry) |
+ goto out_ip6_blk_hole_entry; |
+ net->ipv6.ip6_policy_failed_entry->dst.path = |
+ (struct dst_entry *)net->ipv6.ip6_policy_failed_entry; |
+ net->ipv6.ip6_policy_failed_entry->dst.ops = &net->ipv6.ip6_dst_ops; |
+ dst_init_metrics(&net->ipv6.ip6_policy_failed_entry->dst, |
+ ip6_template_metrics, true); |
#endif |
net->ipv6.sysctl.flush_delay = 0; |
@@ -3742,6 +3793,8 @@ out: |
return ret; |
#ifdef CONFIG_IPV6_MULTIPLE_TABLES |
+out_ip6_blk_hole_entry: |
+ kfree(net->ipv6.ip6_blk_hole_entry); |
out_ip6_prohibit_entry: |
kfree(net->ipv6.ip6_prohibit_entry); |
out_ip6_null_entry: |
@@ -3759,6 +3812,7 @@ static void __net_exit ip6_route_net_exi |
#ifdef CONFIG_IPV6_MULTIPLE_TABLES |
kfree(net->ipv6.ip6_prohibit_entry); |
kfree(net->ipv6.ip6_blk_hole_entry); |
+ kfree(net->ipv6.ip6_policy_failed_entry); |
#endif |
dst_entries_destroy(&net->ipv6.ip6_dst_ops); |
} |
@@ -3832,6 +3886,9 @@ void __init ip6_route_init_special_entri |
init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); |
init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev; |
init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); |
+ init_net.ipv6.ip6_policy_failed_entry->dst.dev = init_net.loopback_dev; |
+ init_net.ipv6.ip6_policy_failed_entry->rt6i_idev = |
+ in6_dev_get(init_net.loopback_dev); |
#endif |
} |
/branches/18.06.1/target/linux/generic/pending-4.9/671-net-provide-defines-for-_POLICY_FAILED-until-all-cod.patch |
---|
@@ -0,0 +1,50 @@ |
From: Jonas Gorski <jogo@openwrt.org> |
Subject: net: provide defines for _POLICY_FAILED until all code is updated |
Upstream introduced ICMPV6_POLICY_FAIL for code 5 of destination |
unreachable, conflicting with our name. |
Add appropriate defines to allow our code to build with the new |
name until we have updated our local patches for older kernels |
and userspace packages. |
Signed-off-by: Jonas Gorski <jogo@openwrt.org> |
--- |
include/uapi/linux/fib_rules.h | 2 ++ |
include/uapi/linux/icmpv6.h | 2 ++ |
include/uapi/linux/rtnetlink.h | 2 ++ |
3 files changed, 6 insertions(+) |
--- a/include/uapi/linux/fib_rules.h |
+++ b/include/uapi/linux/fib_rules.h |
@@ -73,6 +73,8 @@ enum { |
__FR_ACT_MAX, |
}; |
+#define FR_ACT_FAILED_POLICY FR_ACT_POLICY_FAILED |
+ |
#define FR_ACT_MAX (__FR_ACT_MAX - 1) |
#endif |
--- a/include/uapi/linux/icmpv6.h |
+++ b/include/uapi/linux/icmpv6.h |
@@ -118,6 +118,8 @@ struct icmp6hdr { |
#define ICMPV6_POLICY_FAIL 5 |
#define ICMPV6_REJECT_ROUTE 6 |
+#define ICMPV6_FAILED_POLICY ICMPV6_POLICY_FAIL |
+ |
/* |
* Codes for Time Exceeded |
*/ |
--- a/include/uapi/linux/rtnetlink.h |
+++ b/include/uapi/linux/rtnetlink.h |
@@ -219,6 +219,8 @@ enum { |
__RTN_MAX |
}; |
+#define RTN_FAILED_POLICY RTN_POLICY_FAILED |
+ |
#define RTN_MAX (__RTN_MAX - 1) |
/branches/18.06.1/target/linux/generic/pending-4.9/680-NET-skip-GRO-for-foreign-MAC-addresses.patch |
---|
@@ -0,0 +1,154 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: net: replace GRO optimization patch with a new one that supports VLANs/bridges with different MAC addresses |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
include/linux/netdevice.h | 2 ++ |
include/linux/skbuff.h | 3 ++- |
net/core/dev.c | 48 +++++++++++++++++++++++++++++++++++++++++++++++ |
net/ethernet/eth.c | 18 +++++++++++++++++- |
4 files changed, 69 insertions(+), 2 deletions(-) |
--- a/include/linux/netdevice.h |
+++ b/include/linux/netdevice.h |
@@ -1752,6 +1752,8 @@ struct net_device { |
struct netdev_hw_addr_list mc; |
struct netdev_hw_addr_list dev_addrs; |
+ unsigned char local_addr_mask[MAX_ADDR_LEN]; |
+ |
#ifdef CONFIG_SYSFS |
struct kset *queues_kset; |
#endif |
--- a/include/linux/skbuff.h |
+++ b/include/linux/skbuff.h |
@@ -747,7 +747,8 @@ struct sk_buff { |
#ifdef CONFIG_NET_SWITCHDEV |
__u8 offload_fwd_mark:1; |
#endif |
- /* 2, 4 or 5 bit hole */ |
+ __u8 gro_skip:1; |
+ /* 1, 3 or 4 bit hole */ |
#ifdef CONFIG_NET_SCHED |
__u16 tc_index; /* traffic control index */ |
--- a/net/core/dev.c |
+++ b/net/core/dev.c |
@@ -4572,6 +4572,9 @@ static enum gro_result dev_gro_receive(s |
enum gro_result ret; |
int grow; |
+ if (skb->gro_skip) |
+ goto normal; |
+ |
if (!(skb->dev->features & NETIF_F_GRO)) |
goto normal; |
@@ -5864,6 +5867,48 @@ static void __netdev_adjacent_dev_unlink |
&upper_dev->adj_list.lower); |
} |
+static void __netdev_addr_mask(unsigned char *mask, const unsigned char *addr, |
+ struct net_device *dev) |
+{ |
+ int i; |
+ |
+ for (i = 0; i < dev->addr_len; i++) |
+ mask[i] |= addr[i] ^ dev->dev_addr[i]; |
+} |
+ |
+static void __netdev_upper_mask(unsigned char *mask, struct net_device *dev, |
+ struct net_device *lower) |
+{ |
+ struct net_device *cur; |
+ struct list_head *iter; |
+ |
+ netdev_for_each_upper_dev_rcu(dev, cur, iter) { |
+ __netdev_addr_mask(mask, cur->dev_addr, lower); |
+ __netdev_upper_mask(mask, cur, lower); |
+ } |
+} |
+ |
+static void __netdev_update_addr_mask(struct net_device *dev) |
+{ |
+ unsigned char mask[MAX_ADDR_LEN]; |
+ struct net_device *cur; |
+ struct list_head *iter; |
+ |
+ memset(mask, 0, sizeof(mask)); |
+ __netdev_upper_mask(mask, dev, dev); |
+ memcpy(dev->local_addr_mask, mask, dev->addr_len); |
+ |
+ netdev_for_each_lower_dev(dev, cur, iter) |
+ __netdev_update_addr_mask(cur); |
+} |
+ |
+static void netdev_update_addr_mask(struct net_device *dev) |
+{ |
+ rcu_read_lock(); |
+ __netdev_update_addr_mask(dev); |
+ rcu_read_unlock(); |
+} |
+ |
static int __netdev_upper_dev_link(struct net_device *dev, |
struct net_device *upper_dev, bool master, |
void *upper_priv, void *upper_info) |
@@ -5936,6 +5981,7 @@ static int __netdev_upper_dev_link(struc |
goto rollback_lower_mesh; |
} |
+ netdev_update_addr_mask(dev); |
ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev, |
&changeupper_info.info); |
ret = notifier_to_errno(ret); |
@@ -6062,6 +6108,7 @@ void netdev_upper_dev_unlink(struct net_ |
list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) |
__netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr); |
+ netdev_update_addr_mask(dev); |
call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev, |
&changeupper_info.info); |
} |
@@ -6664,6 +6711,7 @@ int dev_set_mac_address(struct net_devic |
if (err) |
return err; |
dev->addr_assign_type = NET_ADDR_SET; |
+ netdev_update_addr_mask(dev); |
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); |
add_device_randomness(dev->dev_addr, dev->addr_len); |
return 0; |
--- a/net/ethernet/eth.c |
+++ b/net/ethernet/eth.c |
@@ -143,6 +143,18 @@ u32 eth_get_headlen(void *data, unsigned |
} |
EXPORT_SYMBOL(eth_get_headlen); |
+static inline bool |
+eth_check_local_mask(const void *addr1, const void *addr2, const void *mask) |
+{ |
+ const u16 *a1 = addr1; |
+ const u16 *a2 = addr2; |
+ const u16 *m = mask; |
+ |
+ return (((a1[0] ^ a2[0]) & ~m[0]) | |
+ ((a1[1] ^ a2[1]) & ~m[1]) | |
+ ((a1[2] ^ a2[2]) & ~m[2])); |
+} |
+ |
/** |
* eth_type_trans - determine the packet's protocol ID. |
* @skb: received socket data |
@@ -171,8 +183,12 @@ __be16 eth_type_trans(struct sk_buff *sk |
skb->pkt_type = PACKET_MULTICAST; |
} |
else if (unlikely(!ether_addr_equal_64bits(eth->h_dest, |
- dev->dev_addr))) |
+ dev->dev_addr))) { |
skb->pkt_type = PACKET_OTHERHOST; |
+ if (eth_check_local_mask(eth->h_dest, dev->dev_addr, |
+ dev->local_addr_mask)) |
+ skb->gro_skip = 1; |
+ } |
/* |
* Some variants of DSA tagging don't have an ethertype field |
/branches/18.06.1/target/linux/generic/pending-4.9/681-NET-add-of_get_mac_address_mtd.patch |
---|
@@ -0,0 +1,127 @@ |
From: John Crispin <blogic@openwrt.org> |
Subject: NET: add mtd-mac-address support to of_get_mac_address() |
Many embedded devices have information such as mac addresses stored inside mtd |
devices. This patch allows us to add a property inside a node describing a |
network interface. The new property points at a mtd partition with an offset |
where the mac address can be found. |
Signed-off-by: John Crispin <blogic@openwrt.org> |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
drivers/of/of_net.c | 37 +++++++++++++++++++++++++++++++++++++ |
include/linux/of_net.h | 1 + |
2 files changed, 38 insertions(+) |
--- a/drivers/of/of_net.c |
+++ b/drivers/of/of_net.c |
@@ -10,6 +10,7 @@ |
#include <linux/of_net.h> |
#include <linux/phy.h> |
#include <linux/export.h> |
+#include <linux/mtd/mtd.h> |
/** |
* of_get_phy_mode - Get phy mode for given device_node |
@@ -38,7 +39,7 @@ int of_get_phy_mode(struct device_node * |
} |
EXPORT_SYMBOL_GPL(of_get_phy_mode); |
-static const void *of_get_mac_addr(struct device_node *np, const char *name) |
+static void *of_get_mac_addr(struct device_node *np, const char *name) |
{ |
struct property *pp = of_find_property(np, name, NULL); |
@@ -47,6 +48,73 @@ static const void *of_get_mac_addr(struc |
return NULL; |
} |
+static const void *of_get_mac_address_mtd(struct device_node *np) |
+{ |
+#ifdef CONFIG_MTD |
+ struct device_node *mtd_np = NULL; |
+ struct property *prop; |
+ size_t retlen; |
+ int size, ret; |
+ struct mtd_info *mtd; |
+ const char *part; |
+ const __be32 *list; |
+ phandle phandle; |
+ u32 mac_inc = 0; |
+ u8 mac[ETH_ALEN]; |
+ void *addr; |
+ |
+ list = of_get_property(np, "mtd-mac-address", &size); |
+ if (!list || (size != (2 * sizeof(*list)))) |
+ return NULL; |
+ |
+ phandle = be32_to_cpup(list++); |
+ if (phandle) |
+ mtd_np = of_find_node_by_phandle(phandle); |
+ |
+ if (!mtd_np) |
+ return NULL; |
+ |
+ part = of_get_property(mtd_np, "label", NULL); |
+ if (!part) |
+ part = mtd_np->name; |
+ |
+ mtd = get_mtd_device_nm(part); |
+ if (IS_ERR(mtd)) |
+ return NULL; |
+ |
+ ret = mtd_read(mtd, be32_to_cpup(list), 6, &retlen, mac); |
+ put_mtd_device(mtd); |
+ |
+ if (!of_property_read_u32(np, "mtd-mac-address-increment", &mac_inc)) |
+ mac[5] += mac_inc; |
+ |
+ if (!is_valid_ether_addr(mac)) |
+ return NULL; |
+ |
+ addr = of_get_mac_addr(np, "mac-address"); |
+ if (addr) { |
+ memcpy(addr, mac, ETH_ALEN); |
+ return addr; |
+ } |
+ |
+ prop = kzalloc(sizeof(*prop), GFP_KERNEL); |
+ if (!prop) |
+ return NULL; |
+ |
+ prop->name = "mac-address"; |
+ prop->length = ETH_ALEN; |
+ prop->value = kmemdup(mac, ETH_ALEN, GFP_KERNEL); |
+ if (!prop->value || of_add_property(np, prop)) |
+ goto free; |
+ |
+ return prop->value; |
+free: |
+ kfree(prop->value); |
+ kfree(prop); |
+#endif |
+ return NULL; |
+} |
+ |
/** |
* Search the device tree for the best MAC address to use. 'mac-address' is |
* checked first, because that is supposed to contain to "most recent" MAC |
@@ -64,11 +132,18 @@ static const void *of_get_mac_addr(struc |
* addresses. Some older U-Boots only initialized 'local-mac-address'. In |
* this case, the real MAC is in 'local-mac-address', and 'mac-address' exists |
* but is all zeros. |
+ * |
+ * If a mtd-mac-address property exists, try to fetch the MAC address from the |
+ * specified mtd device, and store it as a 'mac-address' property |
*/ |
const void *of_get_mac_address(struct device_node *np) |
{ |
const void *addr; |
+ addr = of_get_mac_address_mtd(np); |
+ if (addr) |
+ return addr; |
+ |
addr = of_get_mac_addr(np, "mac-address"); |
if (addr) |
return addr; |
/branches/18.06.1/target/linux/generic/pending-4.9/701-phy_extension.patch |
---|
@@ -0,0 +1,72 @@ |
From: John Crispin <john@phrozen.org> |
Subject: net: phy: add phy_ethtool_ioctl() |
Signed-off-by: John Crispin <john@phrozen.org> |
--- |
drivers/net/phy/phy.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ |
include/linux/phy.h | 1 + |
2 files changed, 45 insertions(+) |
--- a/drivers/net/phy/phy.c |
+++ b/drivers/net/phy/phy.c |
@@ -472,6 +472,50 @@ int phy_ethtool_ksettings_get(struct phy |
} |
EXPORT_SYMBOL(phy_ethtool_ksettings_get); |
+int phy_ethtool_ioctl(struct phy_device *phydev, void *useraddr) |
+{ |
+ u32 cmd; |
+ int tmp; |
+ struct ethtool_cmd ecmd = { ETHTOOL_GSET }; |
+ struct ethtool_value edata = { ETHTOOL_GLINK }; |
+ |
+ if (get_user(cmd, (u32 *) useraddr)) |
+ return -EFAULT; |
+ |
+ switch (cmd) { |
+ case ETHTOOL_GSET: |
+ phy_ethtool_gset(phydev, &ecmd); |
+ if (copy_to_user(useraddr, &ecmd, sizeof(ecmd))) |
+ return -EFAULT; |
+ return 0; |
+ |
+ case ETHTOOL_SSET: |
+ if (copy_from_user(&ecmd, useraddr, sizeof(ecmd))) |
+ return -EFAULT; |
+ return phy_ethtool_sset(phydev, &ecmd); |
+ |
+ case ETHTOOL_NWAY_RST: |
+ /* if autoneg is off, it's an error */ |
+ tmp = phy_read(phydev, MII_BMCR); |
+ if (tmp & BMCR_ANENABLE) { |
+ tmp |= (BMCR_ANRESTART); |
+ phy_write(phydev, MII_BMCR, tmp); |
+ return 0; |
+ } |
+ return -EINVAL; |
+ |
+ case ETHTOOL_GLINK: |
+ edata.data = (phy_read(phydev, |
+ MII_BMSR) & BMSR_LSTATUS) ? 1 : 0; |
+ if (copy_to_user(useraddr, &edata, sizeof(edata))) |
+ return -EFAULT; |
+ return 0; |
+ } |
+ |
+ return -EOPNOTSUPP; |
+} |
+EXPORT_SYMBOL(phy_ethtool_ioctl); |
+ |
/** |
* phy_mii_ioctl - generic PHY MII ioctl interface |
* @phydev: the phy_device struct |
--- a/include/linux/phy.h |
+++ b/include/linux/phy.h |
@@ -827,6 +827,7 @@ int phy_ethtool_ksettings_get(struct phy |
struct ethtool_link_ksettings *cmd); |
int phy_ethtool_ksettings_set(struct phy_device *phydev, |
const struct ethtool_link_ksettings *cmd); |
+int phy_ethtool_ioctl(struct phy_device *phydev, void *useraddr); |
int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); |
int phy_start_interrupts(struct phy_device *phydev); |
void phy_print_status(struct phy_device *phydev); |
/branches/18.06.1/target/linux/generic/pending-4.9/703-phy-add-detach-callback-to-struct-phy_driver.patch |
---|
@@ -0,0 +1,38 @@ |
From: Gabor Juhos <juhosg@openwrt.org> |
Subject: generic: add detach callback to struct phy_driver |
lede-commit: fe61fc2d7d0b3fb348b502f68f98243b3ddf5867 |
Signed-off-by: Gabor Juhos <juhosg@openwrt.org> |
--- |
drivers/net/phy/phy_device.c | 3 +++ |
include/linux/phy.h | 6 ++++++ |
2 files changed, 9 insertions(+) |
--- a/drivers/net/phy/phy_device.c |
+++ b/drivers/net/phy/phy_device.c |
@@ -998,6 +998,9 @@ void phy_detach(struct phy_device *phyde |
struct mii_bus *bus; |
int i; |
+ if (phydev->drv && phydev->drv->detach) |
+ phydev->drv->detach(phydev); |
+ |
phydev->attached_dev->phydev = NULL; |
phydev->attached_dev = NULL; |
phy_suspend(phydev); |
--- a/include/linux/phy.h |
+++ b/include/linux/phy.h |
@@ -507,6 +507,12 @@ struct phy_driver { |
*/ |
int (*did_interrupt)(struct phy_device *phydev); |
+ /* |
+ * Called before an ethernet device is detached |
+ * from the PHY. |
+ */ |
+ void (*detach)(struct phy_device *phydev); |
+ |
/* Clears up any memory if needed */ |
void (*remove)(struct phy_device *phydev); |
/branches/18.06.1/target/linux/generic/pending-4.9/734-net-phy-at803x-allow-to-configure-via-pdata.patch |
---|
@@ -0,0 +1,142 @@ |
From: Gabor Juhos <juhosg@openwrt.org> |
Subject: net: phy: allow to configure AR803x PHYs via platform data |
Add a patch for the at803x phy driver, in order to be able |
to configure some register settings via platform data. |
Signed-off-by: Gabor Juhos <juhosg@openwrt.org> |
--- |
drivers/net/phy/at803x.c | 56 ++++++++++++++++++++++++++++++++ |
include/linux/platform_data/phy-at803x.h | 11 +++++++ |
2 files changed, 67 insertions(+) |
create mode 100644 include/linux/platform_data/phy-at803x.h |
--- a/drivers/net/phy/at803x.c |
+++ b/drivers/net/phy/at803x.c |
@@ -12,12 +12,14 @@ |
*/ |
#include <linux/phy.h> |
+#include <linux/mdio.h> |
#include <linux/module.h> |
#include <linux/string.h> |
#include <linux/netdevice.h> |
#include <linux/etherdevice.h> |
#include <linux/of_gpio.h> |
#include <linux/gpio/consumer.h> |
+#include <linux/platform_data/phy-at803x.h> |
#define AT803X_INTR_ENABLE 0x12 |
#define AT803X_INTR_ENABLE_AUTONEG_ERR BIT(15) |
@@ -45,6 +47,11 @@ |
#define AT803X_REG_CHIP_CONFIG 0x1f |
#define AT803X_BT_BX_REG_SEL 0x8000 |
+#define AT803X_PCS_SMART_EEE_CTRL3 0x805D |
+#define AT803X_SMART_EEE_CTRL3_LPI_TX_DELAY_SEL_MASK 0x3 |
+#define AT803X_SMART_EEE_CTRL3_LPI_TX_DELAY_SEL_SHIFT 12 |
+#define AT803X_SMART_EEE_CTRL3_LPI_EN BIT(8) |
+ |
#define AT803X_DEBUG_ADDR 0x1D |
#define AT803X_DEBUG_DATA 0x1E |
@@ -72,6 +79,7 @@ MODULE_LICENSE("GPL"); |
struct at803x_priv { |
bool phy_reset:1; |
struct gpio_desc *gpiod_reset; |
+ int prev_speed; |
}; |
struct at803x_context { |
@@ -276,8 +284,16 @@ does_not_require_reset_workaround: |
return 0; |
} |
+static void at803x_disable_smarteee(struct phy_device *phydev) |
+{ |
+ phy_write_mmd(phydev, MDIO_MMD_PCS, AT803X_PCS_SMART_EEE_CTRL3, |
+ 1 << AT803X_SMART_EEE_CTRL3_LPI_TX_DELAY_SEL_SHIFT); |
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0); |
+} |
+ |
static int at803x_config_init(struct phy_device *phydev) |
{ |
+ struct at803x_platform_data *pdata; |
int ret; |
ret = genphy_config_init(phydev); |
@@ -298,6 +314,26 @@ static int at803x_config_init(struct phy |
return ret; |
} |
+ pdata = dev_get_platdata(&phydev->mdio.dev); |
+ if (pdata) { |
+ if (pdata->disable_smarteee) |
+ at803x_disable_smarteee(phydev); |
+ |
+ if (pdata->enable_rgmii_rx_delay) |
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, 0, |
+ AT803X_DEBUG_RX_CLK_DLY_EN); |
+ else |
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, |
+ AT803X_DEBUG_RX_CLK_DLY_EN, 0); |
+ |
+ if (pdata->enable_rgmii_tx_delay) |
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, 0, |
+ AT803X_DEBUG_TX_CLK_DLY_EN); |
+ else |
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, |
+ AT803X_DEBUG_TX_CLK_DLY_EN, 0); |
+ } |
+ |
return 0; |
} |
@@ -335,6 +371,8 @@ static int at803x_config_intr(struct phy |
static void at803x_link_change_notify(struct phy_device *phydev) |
{ |
struct at803x_priv *priv = phydev->priv; |
+ struct at803x_platform_data *pdata; |
+ pdata = dev_get_platdata(&phydev->mdio.dev); |
/* |
* Conduct a hardware reset for AT8030/2 every time a link loss is |
@@ -363,6 +401,24 @@ static void at803x_link_change_notify(st |
} else { |
priv->phy_reset = false; |
} |
+ if (pdata && pdata->fixup_rgmii_tx_delay && |
+ phydev->speed != priv->prev_speed) { |
+ switch (phydev->speed) { |
+ case SPEED_10: |
+ case SPEED_100: |
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, 0, |
+ AT803X_DEBUG_TX_CLK_DLY_EN); |
+ break; |
+ case SPEED_1000: |
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, |
+ AT803X_DEBUG_TX_CLK_DLY_EN, 0); |
+ break; |
+ default: |
+ break; |
+ } |
+ |
+ priv->prev_speed = phydev->speed; |
+ } |
} |
static int at803x_aneg_done(struct phy_device *phydev) |
--- /dev/null |
+++ b/include/linux/platform_data/phy-at803x.h |
@@ -0,0 +1,11 @@ |
+#ifndef _PHY_AT803X_PDATA_H |
+#define _PHY_AT803X_PDATA_H |
+ |
+struct at803x_platform_data { |
+ int disable_smarteee:1; |
+ int enable_rgmii_tx_delay:1; |
+ int enable_rgmii_rx_delay:1; |
+ int fixup_rgmii_tx_delay:1; |
+}; |
+ |
+#endif /* _PHY_AT803X_PDATA_H */ |
/branches/18.06.1/target/linux/generic/pending-4.9/735-net-phy-at803x-fix-at8033-sgmii-mode.patch |
---|
@@ -0,0 +1,51 @@ |
From: Roman Yeryomin <roman@advem.lv> |
Subject: kernel: add at803x fix for sgmii mode |
Some (possibly broken) bootloaders incorreclty initialize at8033 |
phy. This patch enables sgmii autonegotiation mode. |
[john@phrozen.org: felix added this to his upstream queue] |
Signed-off-by: Roman Yeryomin <roman@advem.lv> |
--- |
drivers/net/phy/at803x.c | 25 +++++++++++++++++++++++++ |
1 file changed, 25 insertions(+) |
--- a/drivers/net/phy/at803x.c |
+++ b/drivers/net/phy/at803x.c |
@@ -46,6 +46,7 @@ |
#define AT803X_FUNC_DATA 0x4003 |
#define AT803X_REG_CHIP_CONFIG 0x1f |
#define AT803X_BT_BX_REG_SEL 0x8000 |
+#define AT803X_SGMII_ANEG_EN 0x1000 |
#define AT803X_PCS_SMART_EEE_CTRL3 0x805D |
#define AT803X_SMART_EEE_CTRL3_LPI_TX_DELAY_SEL_MASK 0x3 |
@@ -295,6 +296,27 @@ static int at803x_config_init(struct phy |
{ |
struct at803x_platform_data *pdata; |
int ret; |
+ u32 v; |
+ |
+ if (phydev->drv->phy_id == ATH8031_PHY_ID && |
+ phydev->interface == PHY_INTERFACE_MODE_SGMII) |
+ { |
+ v = phy_read(phydev, AT803X_REG_CHIP_CONFIG); |
+ /* select SGMII/fiber page */ |
+ ret = phy_write(phydev, AT803X_REG_CHIP_CONFIG, |
+ v & ~AT803X_BT_BX_REG_SEL); |
+ if (ret) |
+ return ret; |
+ /* enable SGMII autonegotiation */ |
+ ret = phy_write(phydev, MII_BMCR, AT803X_SGMII_ANEG_EN); |
+ if (ret) |
+ return ret; |
+ /* select copper page */ |
+ ret = phy_write(phydev, AT803X_REG_CHIP_CONFIG, |
+ v | AT803X_BT_BX_REG_SEL); |
+ if (ret) |
+ return ret; |
+ } |
ret = genphy_config_init(phydev); |
if (ret < 0) |
/branches/18.06.1/target/linux/generic/pending-4.9/810-pci_disable_common_quirks.patch |
---|
@@ -0,0 +1,60 @@ |
From: Gabor Juhos <juhosg@openwrt.org> |
Subject: debloat: add kernel config option to disabling common PCI quirks |
Signed-off-by: Gabor Juhos <juhosg@openwrt.org> |
--- |
drivers/pci/Kconfig | 6 ++++++ |
drivers/pci/quirks.c | 6 ++++++ |
2 files changed, 12 insertions(+) |
--- a/drivers/pci/Kconfig |
+++ b/drivers/pci/Kconfig |
@@ -71,6 +71,12 @@ config XEN_PCIDEV_FRONTEND |
The PCI device frontend driver allows the kernel to import arbitrary |
PCI devices from a PCI backend to support PCI driver domains. |
+config PCI_DISABLE_COMMON_QUIRKS |
+ bool "PCI disable common quirks" |
+ depends on PCI |
+ help |
+ If you don't know what to do here, say N. |
+ |
config HT_IRQ |
bool "Interrupts on hypertransport devices" |
default y |
--- a/drivers/pci/quirks.c |
+++ b/drivers/pci/quirks.c |
@@ -41,6 +41,7 @@ static void quirk_mmio_always_on(struct |
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID, |
PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on); |
+#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS |
/* The Mellanox Tavor device gives false positive parity errors |
* Mark this device with a broken_parity_status, to allow |
* PCI scanning code to "skip" this now blacklisted device. |
@@ -3038,6 +3039,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I |
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata); |
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata); |
+#endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */ |
/* |
* Ivytown NTB BAR sizes are misreported by the hardware due to an erratum. To |
@@ -3094,6 +3096,8 @@ static void fixup_debug_report(struct pc |
} |
} |
+#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS |
+ |
/* |
* Some BIOS implementations leave the Intel GPU interrupts enabled, |
* even though no one is handling them (f.e. i915 driver is never loaded). |
@@ -3132,6 +3136,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IN |
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); |
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq); |
+#endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */ |
+ |
/* |
* PCI devices which are on Intel chips can skip the 10ms delay |
* before entering D3 mode. |
/branches/18.06.1/target/linux/generic/pending-4.9/811-pci_disable_usb_common_quirks.patch |
---|
@@ -0,0 +1,110 @@ |
From: Felix Fietkau <nbd@nbd.name> |
Subject: debloat: disable common USB quirks |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
drivers/usb/host/pci-quirks.c | 16 ++++++++++++++++ |
drivers/usb/host/pci-quirks.h | 18 +++++++++++++++++- |
include/linux/usb/hcd.h | 7 +++++++ |
3 files changed, 40 insertions(+), 1 deletion(-) |
--- a/drivers/usb/host/pci-quirks.c |
+++ b/drivers/usb/host/pci-quirks.c |
@@ -107,6 +107,8 @@ struct amd_chipset_type { |
u8 rev; |
}; |
+#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS |
+ |
static struct amd_chipset_info { |
struct pci_dev *nb_dev; |
struct pci_dev *smbus_dev; |
@@ -511,6 +513,10 @@ void usb_amd_dev_put(void) |
} |
EXPORT_SYMBOL_GPL(usb_amd_dev_put); |
+#endif /* CONFIG_PCI_DISABLE_COMMON_QUIRKS */ |
+ |
+#if IS_ENABLED(CONFIG_USB_UHCI_HCD) |
+ |
/* |
* Make sure the controller is completely inactive, unable to |
* generate interrupts or do DMA. |
@@ -590,8 +596,17 @@ reset_needed: |
uhci_reset_hc(pdev, base); |
return 1; |
} |
+#else |
+int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base) |
+{ |
+ return 0; |
+} |
+ |
+#endif |
EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc); |
+#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS |
+ |
static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask) |
{ |
u16 cmd; |
@@ -1158,3 +1173,4 @@ static void quirk_usb_early_handoff(stru |
} |
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, |
PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff); |
+#endif |
--- a/drivers/usb/host/pci-quirks.h |
+++ b/drivers/usb/host/pci-quirks.h |
@@ -4,6 +4,9 @@ |
#ifdef CONFIG_PCI |
void uhci_reset_hc(struct pci_dev *pdev, unsigned long base); |
int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base); |
+#endif /* CONFIG_PCI */ |
+ |
+#if defined(CONFIG_PCI) && !defined(CONFIG_PCI_DISABLE_COMMON_QUIRKS) |
int usb_amd_find_chipset_info(void); |
int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev); |
bool usb_amd_hang_symptom_quirk(void); |
@@ -17,12 +20,25 @@ void usb_disable_xhci_ports(struct pci_d |
void sb800_prefetch(struct device *dev, int on); |
#else |
struct pci_dev; |
+static inline int usb_amd_find_chipset_info(void) |
+{ |
+ return 0; |
+} |
+static inline bool usb_amd_hang_symptom_quirk(void) |
+{ |
+ return false; |
+} |
+static inline bool usb_amd_prefetch_quirk(void) |
+{ |
+ return false; |
+} |
static inline void usb_amd_quirk_pll_disable(void) {} |
static inline void usb_amd_quirk_pll_enable(void) {} |
static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {} |
static inline void usb_amd_dev_put(void) {} |
static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} |
static inline void sb800_prefetch(struct device *dev, int on) {} |
+static inline void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev) {} |
#endif /* CONFIG_PCI */ |
#endif /* __LINUX_USB_PCI_QUIRKS_H */ |
--- a/include/linux/usb/hcd.h |
+++ b/include/linux/usb/hcd.h |
@@ -461,7 +461,14 @@ extern int usb_hcd_pci_probe(struct pci_ |
extern void usb_hcd_pci_remove(struct pci_dev *dev); |
extern void usb_hcd_pci_shutdown(struct pci_dev *dev); |
+#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS |
extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev); |
+#else |
+static inline int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev) |
+{ |
+ return 0; |
+} |
+#endif |
#ifdef CONFIG_PM |
extern const struct dev_pm_ops usb_hcd_pci_pm_ops; |
/branches/18.06.1/target/linux/generic/pending-4.9/821-usb-Remove-annoying-warning-about-bogus-URB.patch |
---|
@@ -0,0 +1,76 @@ |
From: Alexey Brodkin <abrodkin@synopsys.com> |
Subject: usb: Remove annoying warning about bogus URB |
When ath9k-htc Wi-Fi dongle is used with generic OHCI controller |
infinite stream of warnings appears in debug console like this: |
-------------------------->8---------------------- |
usb 1-1: new full-speed USB device number 2 using ohci-platform |
usb 1-1: ath9k_htc: Firmware ath9k_htc/htc_9271-1.4.0.fw requested |
usb 1-1: ath9k_htc: Transferred FW: ath9k_htc/htc_9271-1.4.0.fw, size: |
51008 |
------------[ cut here ]------------ |
WARNING: CPU: 0 PID: 19 at drivers/usb/core/urb.c:449 |
usb_submit_urb+0x1b4/0x498() |
usb 1-1: BOGUS urb xfer, pipe 1 != type 3 |
Modules linked in: |
CPU: 0 PID: 19 Comm: kworker/0:1 Not tainted |
4.4.0-rc4-00017-g00e2d79-dirty #3 |
Workqueue: events request_firmware_work_func |
Stack Trace: |
arc_unwind_core.constprop.1+0xa4/0x110 |
---[ end trace 649ef8c342817fc2 ]--- |
------------[ cut here ]------------ |
WARNING: CPU: 0 PID: 19 at drivers/usb/core/urb.c:449 |
usb_submit_urb+0x1b4/0x498() |
usb 1-1: BOGUS urb xfer, pipe 1 != type 3 |
Modules linked in: |
CPU: 0 PID: 19 Comm: kworker/0:1 Tainted: G W |
4.4.0-rc4-00017-g00e2d79-dirty #3 |
Workqueue: events request_firmware_work_func |
Stack Trace: |
arc_unwind_core.constprop.1+0xa4/0x110 |
---[ end trace 649ef8c342817fc3 ]--- |
------------[ cut here ]------------ |
-------------------------->8---------------------- |
There're some discussions in mailing lists proposing to disable |
that particular check alltogether and magically all seem to work |
fine with muted warning. |
Anyways new thread on that regard could be found here: |
http://lists.infradead.org/pipermail/linux-snps-arc/2016-July/001310.html |
Let's see what comes out of that new discussion, hopefully patching |
of generic USB stuff won't be required then. |
Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com> |
--- |
drivers/usb/core/urb.c | 5 ----- |
1 file changed, 5 deletions(-) |
--- a/drivers/usb/core/urb.c |
+++ b/drivers/usb/core/urb.c |
@@ -321,9 +321,6 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb); |
*/ |
int usb_submit_urb(struct urb *urb, gfp_t mem_flags) |
{ |
- static int pipetypes[4] = { |
- PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT |
- }; |
int xfertype, max; |
struct usb_device *dev; |
struct usb_host_endpoint *ep; |
@@ -441,11 +438,6 @@ int usb_submit_urb(struct urb *urb, gfp_ |
* cause problems in HCDs if they get it wrong. |
*/ |
- /* Check that the pipe's type matches the endpoint's type */ |
- if (usb_pipetype(urb->pipe) != pipetypes[xfertype]) |
- dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n", |
- usb_pipetype(urb->pipe), pipetypes[xfertype]); |
- |
/* Check against a simple/standard policy */ |
allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK | |
URB_FREE_BUFFER); |
/branches/18.06.1/target/linux/generic/pending-4.9/834-ledtrig-libata.patch |
---|
@@ -0,0 +1,149 @@ |
From: Daniel Golle <daniel@makrotopia.org> |
Subject: libata: add ledtrig support |
This adds a LED trigger for each ATA port indicating disk activity. |
As this is needed only on specific platforms (NAS SoCs and such), |
these platforms should define ARCH_WANTS_LIBATA_LEDS if there |
are boards with LED(s) intended to indicate ATA disk activity and |
need the OS to take care of that. |
In that way, if not selected, LED trigger support not will be |
included in libata-core and both, codepaths and structures remain |
untouched. |
Signed-off-by: Daniel Golle <daniel@makrotopia.org> |
--- |
drivers/ata/Kconfig | 16 ++++++++++++++++ |
drivers/ata/libata-core.c | 41 +++++++++++++++++++++++++++++++++++++++++ |
include/linux/libata.h | 9 +++++++++ |
3 files changed, 66 insertions(+) |
--- a/drivers/ata/Kconfig |
+++ b/drivers/ata/Kconfig |
@@ -46,6 +46,22 @@ config ATA_VERBOSE_ERROR |
If unsure, say Y. |
+config ARCH_WANT_LIBATA_LEDS |
+ bool |
+ |
+config ATA_LEDS |
+ bool "support ATA port LED triggers" |
+ depends on ARCH_WANT_LIBATA_LEDS |
+ select NEW_LEDS |
+ select LEDS_CLASS |
+ select LEDS_TRIGGERS |
+ default y |
+ help |
+ This option adds a LED trigger for each registered ATA port. |
+ It is used to drive disk activity leds connected via GPIO. |
+ |
+ If unsure, say N. |
+ |
config ATA_ACPI |
bool "ATA ACPI Support" |
depends on ACPI |
--- a/drivers/ata/libata-core.c |
+++ b/drivers/ata/libata-core.c |
@@ -731,6 +731,19 @@ u64 ata_tf_read_block(const struct ata_t |
return block; |
} |
+#ifdef CONFIG_ATA_LEDS |
+#define LIBATA_BLINK_DELAY 20 /* ms */ |
+static inline void ata_led_act(struct ata_port *ap) |
+{ |
+ unsigned long led_delay = LIBATA_BLINK_DELAY; |
+ |
+ if (unlikely(!ap->ledtrig)) |
+ return; |
+ |
+ led_trigger_blink_oneshot(ap->ledtrig, &led_delay, &led_delay, 0); |
+} |
+#endif |
+ |
/** |
* ata_build_rw_tf - Build ATA taskfile for given read/write request |
* @tf: Target ATA taskfile |
@@ -4995,6 +5008,9 @@ struct ata_queued_cmd *ata_qc_new_init(s |
if (tag < 0) |
return NULL; |
} |
+#ifdef CONFIG_ATA_LEDS |
+ ata_led_act(ap); |
+#endif |
qc = __ata_qc_from_tag(ap, tag); |
qc->tag = tag; |
@@ -5896,6 +5912,9 @@ struct ata_port *ata_port_alloc(struct a |
ap->stats.unhandled_irq = 1; |
ap->stats.idle_irq = 1; |
#endif |
+#ifdef CONFIG_ATA_LEDS |
+ ap->ledtrig = kzalloc(sizeof(struct led_trigger), GFP_KERNEL); |
+#endif |
ata_sff_port_init(ap); |
return ap; |
@@ -5917,6 +5936,12 @@ static void ata_host_release(struct devi |
kfree(ap->pmp_link); |
kfree(ap->slave_link); |
+#ifdef CONFIG_ATA_LEDS |
+ if (ap->ledtrig) { |
+ led_trigger_unregister(ap->ledtrig); |
+ kfree(ap->ledtrig); |
+ }; |
+#endif |
kfree(ap); |
host->ports[i] = NULL; |
} |
@@ -6363,7 +6388,23 @@ int ata_host_register(struct ata_host *h |
host->ports[i]->print_id = atomic_inc_return(&ata_print_id); |
host->ports[i]->local_port_no = i + 1; |
} |
+#ifdef CONFIG_ATA_LEDS |
+ for (i = 0; i < host->n_ports; i++) { |
+ if (unlikely(!host->ports[i]->ledtrig)) |
+ continue; |
+ snprintf(host->ports[i]->ledtrig_name, |
+ sizeof(host->ports[i]->ledtrig_name), "ata%u", |
+ host->ports[i]->print_id); |
+ |
+ host->ports[i]->ledtrig->name = host->ports[i]->ledtrig_name; |
+ |
+ if (led_trigger_register(host->ports[i]->ledtrig)) { |
+ kfree(host->ports[i]->ledtrig); |
+ host->ports[i]->ledtrig = NULL; |
+ } |
+ } |
+#endif |
/* Create associated sysfs transport objects */ |
for (i = 0; i < host->n_ports; i++) { |
rc = ata_tport_add(host->dev,host->ports[i]); |
--- a/include/linux/libata.h |
+++ b/include/linux/libata.h |
@@ -38,6 +38,9 @@ |
#include <linux/acpi.h> |
#include <linux/cdrom.h> |
#include <linux/sched.h> |
+#ifdef CONFIG_ATA_LEDS |
+#include <linux/leds.h> |
+#endif |
/* |
* Define if arch has non-standard setup. This is a _PCI_ standard |
@@ -884,6 +887,12 @@ struct ata_port { |
#ifdef CONFIG_ATA_ACPI |
struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */ |
#endif |
+ |
+#ifdef CONFIG_ATA_LEDS |
+ struct led_trigger *ledtrig; |
+ char ledtrig_name[8]; |
+#endif |
+ |
/* owned by EH */ |
u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned; |
}; |
/branches/18.06.1/target/linux/generic/pending-4.9/890-uart_optional_sysrq.patch |
---|
@@ -0,0 +1,40 @@ |
From: John Crispin <john@phrozen.org> |
Subject: serial: do not accept sysrq characters via serial port |
many embedded boards have a disconnected TTL level serial which can |
generate some garbage that can lead to spurious false sysrq detects. |
[john@phrozen.org: sent upstream 22.12.2016] |
Signed-off-by: John Crispin <john@phrozen.org> |
Signed-off-by: Felix Fietkau <nbd@nbd.name> |
--- |
include/linux/serial_core.h | 2 +- |
lib/Kconfig.debug | 5 +++++ |
2 files changed, 6 insertions(+), 1 deletion(-) |
--- a/include/linux/serial_core.h |
+++ b/include/linux/serial_core.h |
@@ -456,7 +456,7 @@ extern void uart_handle_cts_change(struc |
extern void uart_insert_char(struct uart_port *port, unsigned int status, |
unsigned int overrun, unsigned int ch, unsigned int flag); |
-#ifdef SUPPORT_SYSRQ |
+#if defined(SUPPORT_SYSRQ) && defined(CONFIG_MAGIC_SYSRQ_SERIAL) |
static inline int |
uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) |
{ |
--- a/lib/Kconfig.debug |
+++ b/lib/Kconfig.debug |
@@ -410,6 +410,11 @@ config MAGIC_SYSRQ_DEFAULT_ENABLE |
This may be set to 1 or 0 to enable or disable them all, or |
to a bitmask as described in Documentation/sysrq.txt. |
+config MAGIC_SYSRQ_SERIAL |
+ bool "Enable magic SysRq key over serial" |
+ depends on MAGIC_SYSRQ |
+ default y |
+ |
config DEBUG_KERNEL |
bool "Kernel debugging" |
help |
/branches/18.06.1/target/linux/generic/pending-4.9/920-mangle_bootargs.patch |
---|
@@ -0,0 +1,71 @@ |
From: Imre Kaloz <kaloz@openwrt.org> |
Subject: init: add CONFIG_MANGLE_BOOTARGS and disable it by default |
Enabling this option renames the bootloader supplied root= |
and rootfstype= variables, which might have to be know but |
would break the automatisms OpenWrt uses. |
Signed-off-by: Imre Kaloz <kaloz@openwrt.org> |
--- |
init/Kconfig | 9 +++++++++ |
init/main.c | 24 ++++++++++++++++++++++++ |
2 files changed, 33 insertions(+) |
--- a/init/Kconfig |
+++ b/init/Kconfig |
@@ -1694,6 +1694,15 @@ config EMBEDDED |
an embedded system so certain expert options are available |
for configuration. |
+config MANGLE_BOOTARGS |
+ bool "Rename offending bootargs" |
+ depends on EXPERT |
+ help |
+ Sometimes the bootloader passed bogus root= and rootfstype= |
+ parameters to the kernel, and while you want to ignore them, |
+ you need to know the values f.e. to support dual firmware |
+ layouts on the flash. |
+ |
config HAVE_PERF_EVENTS |
bool |
help |
--- a/init/main.c |
+++ b/init/main.c |
@@ -353,6 +353,29 @@ static inline void setup_nr_cpu_ids(void |
static inline void smp_prepare_cpus(unsigned int maxcpus) { } |
#endif |
+#ifdef CONFIG_MANGLE_BOOTARGS |
+static void __init mangle_bootargs(char *command_line) |
+{ |
+ char *rootdev; |
+ char *rootfs; |
+ |
+ rootdev = strstr(command_line, "root=/dev/mtdblock"); |
+ |
+ if (rootdev) |
+ strncpy(rootdev, "mangled_rootblock=", 18); |
+ |
+ rootfs = strstr(command_line, "rootfstype"); |
+ |
+ if (rootfs) |
+ strncpy(rootfs, "mangled_fs", 10); |
+ |
+} |
+#else |
+static void __init mangle_bootargs(char *command_line) |
+{ |
+} |
+#endif |
+ |
/* |
* We need to store the untouched command line for future reference. |
* We also need to store the touched command line since the parameter |
@@ -506,6 +529,7 @@ asmlinkage __visible void __init start_k |
pr_notice("%s", linux_banner); |
setup_arch(&command_line); |
mm_init_cpumask(&init_mm); |
+ mangle_bootargs(command_line); |
setup_command_line(command_line); |
setup_nr_cpu_ids(); |
setup_per_cpu_areas(); |