--- /dev/null
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2009-2025 OpenWrt.org
+
+include $(TOPDIR)/rules.mk
+
+ARCH:=mips
+BOARD:=econet
+BOARDNAME:=EcoNet EN75xx MIPS
+FEATURES:=dt source-only squashfs nand
+SUBTARGETS:=en751221
+
+KERNEL_PATCHVER:=6.12
+
+define Target/Description
+ Build firmware image for EcoNet EN75xx MIPS based boards.
+endef
+
+# include the profiles
+include $(INCLUDE_DIR)/target.mk
+
+$(eval $(call BuildTarget))
--- /dev/null
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/dts-v1/;
+
+/ {
+ compatible = "econet,en751221";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ hpt_clock: clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <200000000>; /* 200 MHz */
+ };
+
+ cpus: cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "mips,mips34Kc";
+ reg = <0>;
+ };
+ };
+
+ cpuintc: interrupt-controller {
+ compatible = "mti,cpu-interrupt-controller";
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+
+ spi_ctrl: spi_controller@1fa10000 {
+ compatible = "airoha,en7523-spi";
+ reg = <0x1fa10000 0x140>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ spi-rx-bus-width = <2>;
+ spi-tx-bus-width = <2>;
+
+ nand: nand@0 {
+ compatible = "spi-nand";
+ reg = <0>;
+ nand-ecc-engine = <&nand>;
+ };
+ };
+
+ intc: interrupt-controller@1fb40000 {
+ compatible = "econet,en751221-intc";
+ reg = <0x1fb40000 0x100>;
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ econet,shadow-interrupts = <7 2>, <8 3>, <13 12>, <30 29>;
+ };
+
+ uart: serial@1fbf0000 {
+ compatible = "ns16550";
+ reg = <0x1fbf0000 0x30>;
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ interrupt-parent = <&intc>;
+ interrupts = <0>;
+ /*
+ * Conversion of baud rate to clock frequency requires a
+ * computation that is not in the ns16550 driver, so this
+ * uart is fixed at 115200 baud.
+ */
+ clock-frequency = <1843200>;
+ };
+
+ timer_hpt: timer@1fbf0400 {
+ compatible = "econet,en751221-timer";
+ reg = <0x1fbf0400 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <30>;
+ clocks = <&hpt_clock>;
+ };
+};
--- /dev/null
+CONFIG_ARCH_32BIT_OFF_T=y
+CONFIG_ARCH_KEEP_MEMBLOCK=y
+CONFIG_ARCH_MMAP_RND_BITS_MAX=15
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=15
+CONFIG_ASN1=y
+CONFIG_ASSOCIATIVE_ARRAY=y
+CONFIG_ASYMMETRIC_KEY_TYPE=y
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+CONFIG_CLKSRC_MMIO=y
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_CLZ_TAB=y
+CONFIG_COMMON_CLK=y
+CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1
+CONFIG_COMPAT_32BIT_TIME=y
+CONFIG_CONTEXT_TRACKING=y
+CONFIG_CONTEXT_TRACKING_IDLE=y
+CONFIG_CPU_BIG_ENDIAN=y
+CONFIG_CPU_GENERIC_DUMP_TLB=y
+CONFIG_CPU_HAS_DIEI=y
+CONFIG_CPU_HAS_PREFETCH=y
+CONFIG_CPU_HAS_RIXI=y
+CONFIG_CPU_HAS_SYNC=y
+CONFIG_CPU_MIPS32=y
+# CONFIG_CPU_MIPS32_R1 is not set
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_CPU_MIPSR2=y
+CONFIG_CPU_MITIGATIONS=y
+CONFIG_CPU_NEEDS_NO_SMARTMIPS_OR_MICROMIPS=y
+CONFIG_CPU_R4K_CACHE_TLB=y
+CONFIG_CPU_RMAP=y
+CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
+CONFIG_CPU_SUPPORTS_HIGHMEM=y
+CONFIG_CPU_SUPPORTS_MSA=y
+CONFIG_CRC16=y
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_HASH_INFO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y
+CONFIG_CRYPTO_LIB_GF128MUL=y
+CONFIG_CRYPTO_LIB_POLY1305_RSIZE=2
+CONFIG_CRYPTO_LIB_SHA1=y
+CONFIG_CRYPTO_LIB_UTILS=y
+CONFIG_CRYPTO_LZO=y
+CONFIG_CRYPTO_RSA=y
+CONFIG_CRYPTO_SIG=y
+CONFIG_CRYPTO_SIG2=y
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_ZBOOT=y
+CONFIG_DTB_ECONET_NONE=y
+# CONFIG_DTB_ECONET_SMARTFIBER_XP8421_B is not set
+CONFIG_DTC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_EARLY_PRINTK_8250=y
+CONFIG_ECONET=y
+CONFIG_ECONET_EN751221_INTC=y
+CONFIG_ECONET_EN751221_TIMER=y
+CONFIG_EXCLUSIVE_SYSTEM_RAM=y
+CONFIG_FS_IOMAP=y
+CONFIG_FUNCTION_ALIGNMENT=0
+CONFIG_FW_LOADER_PAGED_BUF=y
+CONFIG_FW_LOADER_SYSFS=y
+CONFIG_GENERIC_ATOMIC64=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_GENERIC_CPU_AUTOPROBE=y
+CONFIG_GENERIC_GETTIMEOFDAY=y
+CONFIG_GENERIC_IDLE_POLL_SETUP=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_GENERIC_IRQ_CHIP=y
+CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_LIB_ASHLDI3=y
+CONFIG_GENERIC_LIB_ASHRDI3=y
+CONFIG_GENERIC_LIB_CMPDI2=y
+CONFIG_GENERIC_LIB_LSHRDI3=y
+CONFIG_GENERIC_LIB_UCMPDI2=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_SCHED_CLOCK=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GPIO_CDEV=y
+CONFIG_HARDWARE_WATCHPOINTS=y
+CONFIG_HAS_DMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_IOPORT_MAP=y
+CONFIG_HZ_PERIODIC=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_IRQCHIP=y
+CONFIG_IRQ_DOMAIN=y
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_IRQ_MIPS_CPU=y
+CONFIG_IRQ_WORK=y
+# CONFIG_JFFS2_FS is not set
+CONFIG_KEYS=y
+CONFIG_LIBCRC32C=m
+CONFIG_LIBFDT=y
+CONFIG_LOCK_DEBUGGING_SUPPORT=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_MIGRATION=y
+CONFIG_MIPS=y
+CONFIG_MIPS_ASID_BITS=8
+CONFIG_MIPS_ASID_SHIFT=0
+# CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER is not set
+CONFIG_MIPS_CMDLINE_FROM_DTB=y
+CONFIG_MIPS_L1_CACHE_SHIFT=5
+# CONFIG_MIPS_NO_APPENDED_DTB is not set
+CONFIG_MIPS_NR_CPU_NR_MAP=2
+CONFIG_MIPS_RAW_APPENDED_DTB=y
+CONFIG_MIPS_SPRAM=y
+CONFIG_MMU_LAZY_TLB_REFCOUNT=y
+CONFIG_MODULES_USE_ELF_REL=y
+CONFIG_MPILIB=y
+CONFIG_MTD_NAND_CORE=y
+CONFIG_MTD_NAND_ECC=y
+CONFIG_MTD_NAND_MTK_BMT=y
+CONFIG_MTD_SPI_NAND=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_BEB_LIMIT=13
+CONFIG_MTD_UBI_BLOCK=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_NEED_SRCU_NMI_SAFE=y
+CONFIG_NET_EGRESS=y
+CONFIG_NET_FLOW_LIMIT=y
+CONFIG_NET_INGRESS=y
+CONFIG_NET_XGRESS=y
+CONFIG_NO_GENERIC_PCI_IOPORT_MAP=y
+CONFIG_NR_CPUS=2
+CONFIG_OF=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_EARLY_FLATTREE=y
+CONFIG_OF_FLATTREE=y
+CONFIG_OF_GPIO=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_KOBJ=y
+CONFIG_OID_REGISTRY=y
+CONFIG_PADATA=y
+CONFIG_PAGE_POOL=y
+CONFIG_PAGE_SIZE_LESS_THAN_256KB=y
+CONFIG_PAGE_SIZE_LESS_THAN_64KB=y
+CONFIG_PCI_DRIVERS_LEGACY=y
+CONFIG_PERF_USE_VMALLOC=y
+CONFIG_PGTABLE_LEVELS=2
+CONFIG_PKCS7_MESSAGE_PARSER=y
+# CONFIG_PKCS7_TEST_KEY is not set
+# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set
+CONFIG_PTP_1588_CLOCK_OPTIONAL=y
+CONFIG_QUEUED_RWLOCKS=y
+CONFIG_QUEUED_SPINLOCKS=y
+CONFIG_RANDSTRUCT_NONE=y
+CONFIG_RATIONAL=y
+CONFIG_RFS_ACCEL=y
+CONFIG_RPS=y
+# CONFIG_SECONDARY_TRUSTED_KEYRING is not set
+CONFIG_SERIAL_MCTRL_GPIO=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SGL_ALLOC=y
+CONFIG_SMP=y
+CONFIG_SMP_UP=y
+CONFIG_SOCK_RX_QUEUE_MAPPING=y
+CONFIG_SOC_ECONET_EN751221=y
+CONFIG_SPI=y
+CONFIG_SPI_AIROHA_EN7523=y
+CONFIG_SPI_MASTER=y
+CONFIG_SPI_MEM=y
+CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_SYSTEM_DATA_VERIFICATION=y
+# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set
+CONFIG_SYSTEM_TRUSTED_KEYRING=y
+CONFIG_SYS_HAS_CPU_MIPS32_R1=y
+CONFIG_SYS_HAS_CPU_MIPS32_R2=y
+CONFIG_SYS_HAS_EARLY_PRINTK=y
+CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
+CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
+CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y
+CONFIG_SYS_SUPPORTS_MIPS16=y
+CONFIG_SYS_SUPPORTS_SMP=y
+CONFIG_SYS_SUPPORTS_ZBOOT=y
+CONFIG_SYS_SUPPORTS_ZBOOT_UART16550=y
+CONFIG_TARGET_ISA_REV=2
+CONFIG_TICK_CPU_ACCOUNTING=y
+CONFIG_TIMER_OF=y
+CONFIG_TIMER_PROBE=y
+CONFIG_TREE_RCU=y
+CONFIG_TREE_SRCU=y
+CONFIG_UBIFS_ATIME_SUPPORT=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_AUTHENTICATION=y
+CONFIG_UBIFS_FS_SECURITY=y
+CONFIG_USE_GENERIC_EARLY_PRINTK_8250=y
+CONFIG_USE_OF=y
+CONFIG_X509_CERTIFICATE_PARSER=y
+CONFIG_XPS=y
+CONFIG_XXHASH=y
+CONFIG_ZBOOT_LOAD_ADDRESS=0x80020000
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZSTD_COMMON=y
+CONFIG_ZSTD_COMPRESS=y
+CONFIG_ZSTD_DECOMPRESS=y
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2016 OpenWrt.org
+
+# PACKAGES:= kmod-usb2 kmod-ath9k-htc wpad-basic-mbedtls
+
+define Profile/Default
+ NAME:=Default Profile (all drivers)
+endef
+
+define Profile/Default/Description
+ Default package set compatible with most boards.
+endef
+$(eval $(call Profile,Default))
--- /dev/null
+BOARDNAME:=en751221
+CPU_TYPE:=24kc
+KERNELNAME:=vmlinuz.bin
+
+define Target/Description
+ Build firmware images for EcoNet EN751221 family SoC, including
+ EN7512, EN7513, EN7521, EN7526 and EN7586.
+endef
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Block Mapping & Bad Block Tables for EcoNet / Airoha EN75xx NAND.
+ *
+ * These SoCs use two tables, BBT and BMT. The BBT is factory bad blocks and
+ * the BMT is blocks that wore out over time. When a block is added to the BBT,
+ * everything following that block is shifted up by one, so adding a block to
+ * the BBT after the fact is a no-go.
+ *
+ * Blocks added to the BBT reduce the user-servicable area directly, while
+ * blocks added to the BMT use a pool of reserve blocks that is above the user
+ * area. The BBT and BMT tables themselves are also stored at opposite ends of
+ * the reserve area.
+ *
+ * While the BBT can't be changed, it can be reconstructed. To do this, first
+ * the BMT must be reconstructed by scanning blocks in the reserve area to
+ * identify those whose OOB data contains a back-reference to the block they
+ * are mapped to. Once the BMT has been reconstructed, then we can scan all
+ * remaining blocks to identify any remaining which are marked bad, and
+ * consider them as (probably!) factory bad blocks.
+ *
+ * Reconstructing the BBT is not very safe because any confusion between a
+ * factory bad block and a worn out block will result in wrong offsets and in
+ * effect, data loss. Furthermore, bad blocks do not politely identify
+ * themselves, generally they error out when we try to read them. Still, we
+ * make the best effort we can by tagging worn blocks with 0x55 rather than
+ * 0x00 which is used to tag factory blocks. Vendor firmware does not do this,
+ * so if the bootloader or vendor OS marks a block bad, it will be
+ * indistinguishable from a factory bad block.
+ *
+ * The layout looks a little bit like this:
+ *
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |D D D D F D D D D D D D W D D D D D D D D F D D|B M M 0 0 0 0 0 T|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | User Area | Reserve Area |
+ *
+ * - D: Data block
+ * - F: Factory bad block
+ * - W: Worn block
+ * - B: BBT block
+ * - M: Mapped block (replacing a worn block)
+ * - 0: Free block
+ * - T: BMT table block
+ *
+ * In this example, there are 22 *usable* blocks in the user area (we include
+ * the worn blocks but exclude the factory bad blocks) so this disk will
+ * report 2816 KiB.
+ *
+ * The bottom of the reserve area is decided by counting down from the
+ * end until we have REQUIRED_GOOD_BLOCKS non-bad blocks. So when blocks
+ * wear out in the reserve area, the bottom is moved down, stealing
+ * blocks from the end of the user area and making the disk "shrink".
+ * A side-effect of this is if the user puts a valid BBT at the end of
+ * their space, there's a chance it might get picked up a THE BBT.
+ *
+ * Mercifully, blocks in the reserve area are never added to the BBT or
+ * BMT, so we don't have any mapping to do in the reserve area, we just
+ * have to check every block to see if it's bad before using it.
+ *
+ * Configuration:
+ *
+ * This driver is configured by device tree properties, the following
+ * properties are available:
+ *
+ * - econet,bmt;
+ * This boolean property enables the BMT, if it is not present on the
+ * MTD device, the BMT will not be enabled.
+ *
+ * - econet,enable-remap;
+ * This boolean property enables remapping of observed worn blocks. It can
+ * be placed either on the device or on a partition within a fixed partitions
+ * table. By default, this module will handle existing mappings but will not
+ * update them. If remapping is enabled, a block may be remapped even if it
+ * is only being read, and remapping carries a risk of lost data, so you
+ * should avoid enabling this on critical partitions like the bootloader, and
+ * you should also avoid enabling this on partitions like UBI which have
+ * their own remapping algorithms.
+ *
+ * - econet,assert-reserve-size = <u32>;
+ * This allows you to assert that the computed reserve size matches
+ * the bootloader. It is typical for bootloaders to log a message
+ * such as "bmt pool size: 163" on startup. The computed reserve size MUST
+ * match the bootloader, otherwise it will be looking for the BBT in the
+ * be wrong place. If a block in the reserve space wears out, the reserve
+ * size will be increased to account for it, so this property is not
+ * appropriate for production use. But when porting to a new board, it will
+ * ensure that this module exits early if its calculation does not match the
+ * platform.
+ *
+ * - econet,can-write-factory-bbt;
+ * This boolean property enables updating / rebuilding of the factory BBT.
+ * ANY CHANGE TO THE BBT IMPLIES DATA LOSS. If you enable this in conjunction
+ * with econet,factory-badblocks then it will set the BBT to the configured
+ * bad blocks. If you enable it alone, it will use the bootloader's logic:
+ * Search for a BBT, if one cannot be found then scan the disk for faulty
+ * blocks which have not been mapped as "worn blocks", mark them all as bad,
+ * and create a new BBT with their indexes. This applies to the whole disk
+ * and makes no effort to recover data, it might decide the bootloader is
+ * bad, you were warned.
+ *
+ * - econet,factory-badblocks = <u32 array>;
+ * This property allows you to specify the factory bad blocks.
+ * If econet,can-write-factory-bbt is unset, this is an assertion which will
+ * cause early exit if the observed BBT does not match the specified bad
+ * blocks. If econet,can-write-factory-bbt is set, this will overwrite the
+ * BBT with the specified bad blocks.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include "mtk_bmt.h"
+
+#define MAX_BMT_SIZE 256
+
+/* Size field is a u8 but vendor firmware checksums over all 1000 places. */
+#define MAX_BBT_SIZE 1000
+
+/* Vendor firmware calls this POOL_GOOD_BLOCK_PERCENT */
+#define REQUIRED_GOOD_BLOCKS(total_blocks) ((total_blocks) * 8 / 100)
+
+/* This is ours but it is on-disk so we need consensus with ourselves. */
+#define BLOCK_WORN_MARK 0x55
+
+/* How hard to try, these must be at least one. */
+#define WRITE_TRIES 3
+#define ERASE_TRIES 3
+/*
+ * Number of blocks to try to write updated tables to before failing.
+ * Total write attempts will be WRITE_TRIES * UPDATE_TABLE_TRIES
+ */
+#define UPDATE_TABLE_TRIES 3
+
+/* This is a lot, but if this fails, we're probably losing data. */
+#define REMAP_COPY_TRIES 10
+
+#define INITIAL_READ_TRIES 3
+
+/* Max number of FBBs that can be expressed in the devicetree. */
+#define MAX_FACTORY_BAD_BLOCKS_OF 32
+
+const char *log_pfx = "en75_bmt";
+const char *name_can_write_factory_bbt = "econet,can-write-factory-bbt";
+const char *name_factory_badblocks = "econet,factory-badblocks";
+const char *name_enable_remap = "econet,enable-remap";
+const char *name_assert_reserve_size = "econet,assert-reserve-size";
+
+/* To promote readability, most functions must have their inputs passed in. */
+#define bmtd dont_directly_reference_mtk_bmtd
+
+/*
+ * On disk
+ */
+
+struct bmt_table_header {
+ /* "BMT" */
+ char signature[3];
+ /* 1 */
+ u8 version;
+ /* Unused */
+ u8 bad_count;
+ /* Number of mappings in table */
+ u8 size;
+ /* bmt_checksum() */
+ u8 checksum;
+ /* Unused */
+ u8 reserved[13];
+} bmt_table_header;
+static_assert(sizeof(struct bmt_table_header) == 20);
+
+struct bmt_entry {
+ /* The bad block which is being mapped */
+ u16 from;
+ /* The block in the reserve area that is being used */
+ u16 to;
+};
+
+struct bmt_table {
+ struct bmt_table_header header;
+ struct bmt_entry table[MAX_BMT_SIZE];
+};
+
+struct bbt_table_header {
+ /* "RAWB" */
+ char signature[4];
+ /* bbt_checksum() (only 16 bits used) */
+ u32 checksum;
+ /* 1 */
+ u8 version;
+ /* Number of bad blocks in table */
+ u8 size;
+ /* Unused (ffff) */
+ u8 reserved[2];
+};
+
+static_assert(sizeof(struct bbt_table_header) == 12);
+
+struct bbt_table {
+ struct bbt_table_header header;
+ /* This must be stored in ascending numerical order */
+ u16 table[MAX_BBT_SIZE];
+};
+
+static_assert(sizeof(struct bbt_table) == 2012);
+
+/*
+ * In memory
+ */
+
+/* Use this to differentiate between on-disk indexes and integers */
+struct block_index {
+ u16 index;
+};
+
+static_assert(sizeof(struct block_index) == 2);
+
+enum block_status {
+ BS_INVALID,
+ BS_AVAILABLE,
+ BS_BAD,
+ BS_MAPPED,
+ BS_BMT,
+ BS_BBT,
+ BS_NEED_ERASE,
+};
+
+struct block_info {
+ struct block_index index;
+ enum block_status status : 16;
+};
+
+static_assert(sizeof(struct block_info) == 4);
+
+struct block_range {
+ u16 begin;
+ u16 end;
+};
+
+/*
+ * Context and params
+ */
+
+struct en75_bmt_m {
+ struct bmt_desc *mtk;
+
+ /* In-memory copy of the BBT */
+ struct bbt_table bbt;
+
+ /* In-memory copy of the BMT */
+ struct bmt_table bmt;
+
+ /* Array of blocks in reserve area, size = reserve_block_count(ctx) */
+ struct block_info *rblocks;
+
+ /* BBT is on the first usable block after this. */
+ u16 reserve_area_begin;
+
+ /*
+ * Only allow remapping of blocks within the following ranges.
+ * Expressed as user block index, not mapped through BBT.
+ */
+ u16 can_remap_range_count;
+ struct block_range *can_remap_ranges;
+
+ /* Incremented each time the BBT is changed, reset when written */
+ s8 bbt_dirty;
+
+ /* Incremented each time the BMT is changed, reset when written */
+ s8 bmt_dirty;
+
+ /* Unless set, fail any attempt to write the BBT. */
+ s8 can_write_factory_bbt;
+};
+
+/*
+ * In-memory functions (do not read or write)
+ */
+
+static u16 bbt_checksum(const struct bbt_table *bbt)
+{
+ const u8 *data = (u8 *)bbt->table;
+ u16 checksum = bbt->header.version + bbt->header.size;
+
+ for (int i = 0; i < MAX_BBT_SIZE * sizeof(bbt->table[0]); i++)
+ checksum += data[i];
+
+ return checksum;
+}
+
+static u8 bmt_checksum(const struct bmt_table *bmt, int check_entries)
+{
+ int length;
+ const u8 *data;
+ u8 checksum;
+
+ WARN_ON_ONCE(check_entries > MAX_BMT_SIZE);
+ length = min(check_entries, MAX_BMT_SIZE) * sizeof(bmt->table[0]);
+ data = (u8 *)&bmt->table;
+ checksum = bmt->header.version + bmt->header.size;
+ for (int i = 0; i < length; i++)
+ checksum += data[i];
+
+ return checksum;
+}
+
+static int reserve_block_count(const struct en75_bmt_m *ctx)
+{
+ return ctx->mtk->total_blks - ctx->reserve_area_begin;
+}
+
+/* return a block_info or error pointer */
+static struct block_info *find_available_block(const struct en75_bmt_m *ctx, bool start_from_end)
+{
+ int limit = reserve_block_count(ctx);
+ int i = 0;
+ int d = 1;
+
+ /*
+ * rblocks is populated by scanning in reverse,
+ * so lowest block numbers are at the end.
+ */
+ if (!start_from_end) {
+ i = limit - 1;
+ d = -1;
+ }
+ for (; i < limit && i >= 0; i += d) {
+ if (ctx->rblocks[i].status == BS_AVAILABLE)
+ return &ctx->rblocks[i];
+ }
+ return ERR_PTR(-ENOSPC);
+}
+
+static int compare_bbt(const void *a, const void *b)
+{
+ return *(u16 *)a - *(u16 *)b;
+}
+
+static void sort_bbt(struct bbt_table *bbt)
+{
+ sort(bbt->table, bbt->header.size, sizeof(bbt->table[0]),
+ compare_bbt, NULL);
+}
+
+/*
+ * When there's a factory bad block, we shift everything above it up by one.
+ * We sort the BBT so that we can do this in one pass. The vendor firmware
+ * also requires an ascending ordered BBT.
+ */
+static int get_mapping_block_bbt(const struct en75_bmt_m *ctx, int block)
+{
+ int size = ctx->bbt.header.size;
+
+ for (int i = 0; i < size; i++)
+ if (ctx->bbt.table[i] <= block)
+ block++;
+
+ if (block >= ctx->reserve_area_begin || block < 0)
+ return -EINVAL;
+
+ return block;
+}
+
+static int get_mapping_block(const struct en75_bmt_m *ctx, int block)
+{
+ block = get_mapping_block_bbt(ctx, block);
+
+ if (block < 0)
+ return block;
+
+ for (int i = ctx->bmt.header.size - 1; i >= 0; i--)
+ if (ctx->bmt.table[i].from == block)
+ return ctx->bmt.table[i].to;
+
+ return block;
+}
+
+static bool block_index_is_sane(const struct en75_bmt_m *ctx,
+ const struct block_index bi,
+ bool user_block)
+{
+ if (bi.index >= ctx->mtk->total_blks)
+ return false;
+ if (user_block && bi.index >= ctx->reserve_area_begin)
+ return false;
+ if (!user_block && bi.index < ctx->reserve_area_begin)
+ return false;
+ return true;
+}
+
+/*
+ * Write
+ */
+
+static int w_write(const struct en75_bmt_m *ctx,
+ const struct block_index bi,
+ bool user_area,
+ const char *name,
+ size_t len,
+ u8 *buf,
+ bool oob)
+{
+ struct mtd_oob_ops ops = {
+ .mode = MTD_OPS_PLACE_OOB,
+ .ooboffs = 0,
+ .ooblen = (oob) ? len : 0,
+ .oobbuf = (oob) ? buf : NULL,
+ .len = (!oob) ? len : 0,
+ .datbuf = (!oob) ? buf : NULL,
+ };
+ int ret;
+
+ if (WARN_ON(!block_index_is_sane(ctx, bi, user_area)))
+ return -EINVAL;
+
+ for (int i = 0; i < WRITE_TRIES; i++) {
+ ret = ctx->mtk->_write_oob(
+ ctx->mtk->mtd,
+ ((loff_t)bi.index) << ctx->mtk->blk_shift,
+ &ops);
+ if (!ret)
+ break;
+ }
+ if (ret)
+ pr_warn("%s: error writing %s at %d\n",
+ log_pfx, name, bi.index);
+ return ret;
+}
+
+/*
+ * Erase & Mark bad
+ */
+
+static int w_mark_bad(
+ const struct en75_bmt_m *ctx,
+ const struct block_index bi,
+ bool user_area)
+{
+ u8 fdm[4] = {BLOCK_WORN_MARK, 0xff, 0xff, 0xff};
+
+ /*
+ * Don't erase first because it's damaged so erase is likely to fail.
+ * In we should only be clearing bits so erase should be unnecessary.
+ */
+ return w_write(ctx, bi, user_area, "BAD_BLK",
+ sizeof(fdm), fdm, true);
+}
+
+static int w_erase_one(
+ const struct en75_bmt_m *ctx,
+ const struct block_index bi,
+ bool user_area)
+{
+ int ret = 0;
+
+ if (WARN_ON(!block_index_is_sane(ctx, bi, user_area)))
+ return -EINVAL;
+
+ for (int i = 0; i < ERASE_TRIES; i++) {
+ ret = bbt_nand_erase(bi.index);
+ if (!ret)
+ break;
+ }
+
+ return ret;
+}
+
+static void w_erase_pending(const struct en75_bmt_m *ctx)
+{
+ int rblocks;
+
+ rblocks = reserve_block_count(ctx);
+ for (int i = 0; i < rblocks; i++) {
+ const struct block_info bif = ctx->rblocks[i];
+ int ret;
+
+ if (bif.status != BS_NEED_ERASE)
+ continue;
+
+ ret = w_erase_one(ctx, bif.index, false);
+ if (ret) {
+ if (WARN_ON_ONCE(ret == -EINVAL)) {
+ /* Just set status bad so we ignore it */
+ } else {
+ pr_info("%s: failed to erase block %d, marking bad\n",
+ log_pfx, bif.index.index);
+ w_mark_bad(ctx, bif.index, false);
+ }
+ ctx->rblocks[i].status = BS_BAD;
+ } else {
+ ctx->rblocks[i].status = BS_AVAILABLE;
+ }
+ }
+}
+
+static void mark_for_erasure(struct en75_bmt_m *ctx, enum block_status with_status)
+{
+ int rblocks = reserve_block_count(ctx);
+
+ for (int i = 0; i < rblocks; i++) {
+ if (ctx->rblocks[i].status == with_status)
+ ctx->rblocks[i].status = BS_NEED_ERASE;
+ }
+}
+
+/*
+ * Update tables
+ */
+
+/*
+ * Try on UPDATE_TABLE_TRIES blocks then give up
+ * Return a block_info or pointer error.
+ */
+static struct block_info *w_update_table(
+ const struct en75_bmt_m *ctx,
+ bool start_from_end,
+ const char *name,
+ size_t len,
+ u8 *buf)
+{
+ for (int i = 0; i < UPDATE_TABLE_TRIES; i++) {
+ struct block_info *bif =
+ find_available_block(ctx, start_from_end);
+ int ret;
+
+ if (IS_ERR(bif)) {
+ pr_err("%s: no space to store %s\n", log_pfx, name);
+ return bif;
+ }
+ ret = w_write(
+ ctx, bif->index, false, name,
+ len, buf, false);
+ if (ret) {
+ bif->status = BS_NEED_ERASE;
+ continue;
+ }
+ return bif;
+ }
+ return ERR_PTR(-EIO);
+}
+
+static int w_sync_tables(struct en75_bmt_m *ctx)
+{
+ w_erase_pending(ctx);
+
+ if (ctx->bmt_dirty) {
+ int dirty = ctx->bmt_dirty;
+ struct block_info *new_bmt_block;
+ int rblocks;
+
+ rblocks = reserve_block_count(ctx);
+ for (int i = ctx->bmt.header.size; i < rblocks; i++)
+ ctx->bmt.table[i] = (struct bmt_entry){ 0 };
+ ctx->bmt.header.checksum =
+ bmt_checksum(&ctx->bmt, ctx->bmt.header.size);
+ new_bmt_block = w_update_table(
+ ctx,
+ true,
+ "BMT",
+ sizeof(ctx->bmt),
+ (u8 *)&ctx->bmt);
+
+ /*
+ * If we can't write the BMT, we won't try to write the BBT.
+ * Without the BMT it is impossible to safely reconstruct.
+ */
+ if (IS_ERR(new_bmt_block)) {
+ pr_err("%s: error writing BMT to block\n", log_pfx);
+ return PTR_ERR(new_bmt_block);
+ }
+ pr_info("%s: BMT written to block %d\n",
+ log_pfx, new_bmt_block->index.index);
+ mark_for_erasure(ctx, BS_BMT);
+ new_bmt_block->status = BS_BMT;
+
+ ctx->bmt_dirty -= dirty;
+ WARN_ON(ctx->bmt_dirty);
+ }
+
+ if (ctx->bbt_dirty && !ctx->can_write_factory_bbt) {
+ WARN_ONCE("%s: BUG: BBT requires update but %s is not set\n",
+ log_pfx, name_can_write_factory_bbt);
+ } else if (ctx->bbt_dirty) {
+ int dirty = ctx->bbt_dirty;
+ struct block_info *new_bbt_block;
+
+ for (int i = ctx->bbt.header.size; i < MAX_BBT_SIZE; i++)
+ ctx->bbt.table[i] = 0;
+ ctx->bbt.header.checksum = bbt_checksum(&ctx->bbt);
+ new_bbt_block = w_update_table(
+ ctx,
+ false,
+ "BBT",
+ sizeof(ctx->bbt),
+ (u8 *)&ctx->bbt);
+
+ if (IS_ERR(new_bbt_block)) {
+ pr_err("%s: error writing BBT to block\n", log_pfx);
+ return PTR_ERR(new_bbt_block);
+ }
+ pr_info("%s: BBT written to block %d\n",
+ log_pfx, new_bbt_block->index.index);
+ mark_for_erasure(ctx, BS_BBT);
+ new_bbt_block->status = BS_BBT;
+
+ ctx->bbt_dirty -= dirty;
+ WARN_ON(ctx->bbt_dirty);
+ }
+
+ w_erase_pending(ctx);
+ return 0;
+}
+
+/*
+ * Remap
+ */
+
+static int w_make_mapping(const struct en75_bmt_m *ctx,
+ const struct block_info replacement_block,
+ u16 bad_block_index)
+{
+ u8 fdm[4] = {0xff, 0xff, 0xff, 0xff};
+
+ if (WARN_ON_ONCE(bad_block_index >= ctx->reserve_area_begin))
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(replacement_block.status != BS_AVAILABLE))
+ return -EINVAL;
+
+ /* vendor firmware uses host order */
+ memcpy(&fdm[2], &bad_block_index, sizeof(bad_block_index));
+ return w_write(
+ ctx,
+ replacement_block.index,
+ false,
+ "REMAP",
+ sizeof(fdm),
+ fdm,
+ true);
+}
+
+static int w_remap_block(struct en75_bmt_m *ctx,
+ u16 block,
+ struct block_info *maybe_mapped_block,
+ int copy_len)
+{
+ int bmt_size = ctx->bmt.header.size;
+ bool mapped_already_in_bmt = false;
+ int ret;
+ struct block_info *new_block;
+
+ if (ctx->bmt.header.size == 0xff) {
+ pr_err("%s: BMT full, cannot add more mappings\n", log_pfx);
+ return -ENOSPC;
+ }
+
+ new_block = find_available_block(ctx, false);
+ if (IS_ERR(new_block)) {
+ pr_err("%s: no space to remap block %d\n", log_pfx, block);
+ return -ENOSPC;
+ }
+
+ ret = w_make_mapping(ctx, *new_block, block);
+ if (ret) {
+ new_block->status = BS_NEED_ERASE;
+ return ret;
+ }
+
+ if (copy_len) {
+ int ret;
+ u16 copy_from = block;
+
+ if (maybe_mapped_block)
+ copy_from = maybe_mapped_block->index.index;
+
+ for (int i = 0; i < REMAP_COPY_TRIES; i++) {
+ ret = bbt_nand_copy(new_block->index.index,
+ copy_from, copy_len);
+ if (!ret)
+ break;
+ }
+ if (ret) {
+ /*
+ * We can either return an error or continue.
+ * If the user is reading, not returning an error means
+ * they're going to suddenly switch to reading a another
+ * (empty) block and won't know it.
+ *
+ * If the user is erasing, returning an error means we're
+ * not doing our job. In any case, we ideally should be
+ * refusing to read newly remapped blocks until the user
+ * has issued an erase command, but we're using the mtk_bmt
+ * framework which does not support that, so we're going to
+ * have to continue.
+ *
+ * Good luck, user.
+ */
+ pr_err("%s: remap copy %d->%d failed LIKELY DATA LOSS!\n",
+ log_pfx, copy_from, new_block->index.index);
+ }
+ }
+
+ for (int i = 0; i < bmt_size; i++)
+ if (ctx->bmt.table[i].from == block) {
+ ctx->bmt.table[i].to = new_block->index.index;
+ mapped_already_in_bmt = true;
+ ctx->bmt_dirty++;
+ break;
+ }
+
+ if (!mapped_already_in_bmt) {
+ ctx->bmt.table[ctx->bmt.header.size++] = (struct bmt_entry) {
+ .from = block,
+ .to = new_block->index.index
+ };
+ ctx->bmt_dirty++;
+ }
+
+ new_block->status = BS_MAPPED;
+
+ /*
+ * Directly mark the user block bad (if possible), the mapped block we can
+ * try to reclaim, mark it need erase and see if the eraser decides it's bad.
+ */
+ w_mark_bad(ctx, (struct block_index) { .index = block }, true);
+ if (maybe_mapped_block)
+ maybe_mapped_block->status = BS_NEED_ERASE;
+
+ return 0;
+}
+
+enum unmap_erase {
+ UE_NO_ERASE,
+ UE_ATTEMPT_ERASE,
+ UE_REQUIRE_ERASE,
+};
+
+/*
+ * If no mapping in BMT, -EINVAL
+ * Attempt erase of block if requested
+ * Remove mapping from BMT
+ * Set mapped block to needs erase
+ */
+static int w_unmap_block(struct en75_bmt_m *ctx,
+ u16 block,
+ enum unmap_erase erase)
+{
+ int bmt_size = ctx->bmt.header.size;
+ struct block_info *mapped_block = NULL;
+ int bmt_index = 0;
+
+ for (; bmt_index < bmt_size; bmt_index++) {
+ int rblocks;
+
+ rblocks = reserve_block_count(ctx);
+ if (ctx->bmt.table[bmt_index].from != block)
+ continue;
+
+ for (int j = 0; j < rblocks; j++) {
+ if (ctx->rblocks[j].index.index == ctx->bmt.table[bmt_index].to) {
+ mapped_block = &ctx->rblocks[j];
+ break;
+ }
+ }
+ break;
+ }
+ if (!mapped_block) {
+ pr_err("%s: block %d not mapped\n", log_pfx, block);
+ return -EINVAL;
+ }
+ WARN_ON_ONCE(mapped_block->status != BS_MAPPED);
+ if (erase > UE_NO_ERASE) {
+ int ret;
+
+ ret = w_erase_one(ctx, mapped_block->index, false);
+ if (ret) {
+ if (erase == UE_REQUIRE_ERASE) {
+ pr_err("%s: unmap block %d: erase failed\n",
+ log_pfx, block);
+ return ret;
+ }
+ pr_warn("%s: unmap block %d: erase failed\n",
+ log_pfx, block);
+ }
+ }
+
+ ctx->bmt.table[bmt_index] = ctx->bmt.table[--bmt_size];
+ ctx->bmt.table[bmt_size] = (struct bmt_entry) { 0 };
+ ctx->bmt.header.size = bmt_size;
+ ctx->bmt_dirty++;
+
+ mapped_block->status = BS_NEED_ERASE;
+ return 0;
+}
+
+/*
+ * Init functions
+ */
+
+enum block_is_bad {
+ /* Good block */
+ BB_GOOD,
+ /* Probably marked bad at the factory (or by vendor firmware!) */
+ BB_FACTORY_BAD,
+ /* Marked bad by us */
+ BB_WORN,
+ /* We don't know */
+ BB_UNKNOWN_BAD,
+};
+
+static enum block_is_bad fdm_is_bad(u8 fdm[static 4])
+{
+ if (fdm[0] == 0xff && fdm[1] == 0xff)
+ return BB_GOOD;
+ if (fdm[0] == BLOCK_WORN_MARK)
+ return BB_WORN;
+ if (fdm[0] == 0x00 || fdm[1] == 0x00)
+ return BB_FACTORY_BAD;
+ return BB_UNKNOWN_BAD;
+}
+
+static bool fdm_is_mapped(u8 fdm[static 4])
+{
+ if (fdm[0] != 0xff || fdm[1] != 0xff)
+ return false;
+ return fdm[2] != 0xff || fdm[3] != 0xff;
+}
+
+static void r_reconstruct_bmt(struct en75_bmt_m *ctx)
+{
+ int reserve_area_begin = ctx->reserve_area_begin;
+
+ memset(&ctx->bmt, 0xff, sizeof(ctx->bmt.header));
+ /*
+ * The table must be zero because the vendor firmware checksums
+ * over a number of entries equal to the number of blocks in the
+ * reserve area (note that when a block in the reserve area fails,
+ * this number will increase on next boot!). But we and the vendor
+ * firmware both store the entire table, and zeroed entries do not
+ * affect the checksum.
+ */
+ memset(&ctx->bmt, 0x00, sizeof(ctx->bmt.table));
+ memcpy(&ctx->bmt.header.signature, "BMT", 3);
+ ctx->bmt.header.version = 1;
+ ctx->bmt.header.size = 0;
+ ctx->bmt_dirty++;
+
+ for (int i = ctx->mtk->total_blks - 1; i >= reserve_area_begin; i--) {
+ unsigned short mapped_block;
+ u8 fdm[4];
+ int ret;
+
+ ret = bbt_nand_read(blk_pg(i),
+ ctx->mtk->data_buf, ctx->mtk->pg_size,
+ fdm, sizeof(fdm));
+ if (ret < 0 || fdm_is_bad(fdm))
+ continue;
+
+ /* Vendor firmware uses host order. */
+ memcpy(&mapped_block, &fdm[2], 2);
+ if (mapped_block >= reserve_area_begin)
+ continue;
+ pr_info("%s: Found mapping %d->%d\n", log_pfx, mapped_block, i);
+ ctx->bmt.table[ctx->bmt.header.size++] = (struct bmt_entry) {
+ .from = mapped_block,
+ .to = i
+ };
+ }
+}
+
+static int r_reconstruct_bbt(struct bbt_table *bbt_out, const struct en75_bmt_m *ctx)
+{
+ int reserve_area_begin = ctx->reserve_area_begin;
+ int bmt_size = ctx->bmt.header.size;
+
+ /* Need the BMT to exist in order to reconstruct the BBT. */
+ if (WARN_ON_ONCE(!ctx->bmt.header.version))
+ return -EINVAL;
+
+ memset(bbt_out, 0xff, sizeof(bbt_out->header));
+ /* Vendor firmware checksums the entire table, no matter how much is used. */
+ memset(bbt_out->table, 0x00, sizeof(bbt_out->table));
+ memcpy(bbt_out->header.signature, "RAWB", 4);
+ bbt_out->header.version = 1;
+ bbt_out->header.size = 0;
+
+ for (int i = 0; i < reserve_area_begin; i++) {
+ bool is_mapped = false;
+ int ret;
+ u8 fdm[4];
+
+ for (int j = 0; j < bmt_size; j++)
+ if (ctx->bmt.table[j].from == i) {
+ is_mapped = true;
+ break;
+ }
+
+ if (is_mapped)
+ continue;
+
+ ret = bbt_nand_read(blk_pg(i),
+ ctx->mtk->data_buf, ctx->mtk->pg_size,
+ fdm, sizeof(fdm));
+ if (!ret) {
+ enum block_is_bad status = fdm_is_bad(fdm);
+
+ if (status == BB_GOOD || status == BB_WORN)
+ continue;
+ }
+
+ pr_info("%s: Found factory bad block %d\n", log_pfx, i);
+ bbt_out->table[bbt_out->header.size++] = (u16)i;
+ }
+ sort_bbt(bbt_out);
+ return 0;
+}
+
+static bool block_is_erased(u8 *data, u32 datalen, u8 *oob, u32 ooblen)
+{
+ for (int i = 0; i < datalen; i++)
+ if (data[i] != 0xff)
+ return false;
+ for (int i = 0; i < ooblen; i++)
+ if (oob[i] != 0xff)
+ return false;
+ return true;
+}
+
+static int try_parse_bbt(struct bbt_table *out, u8 *buf, int len)
+{
+ static struct bbt_table workspace;
+
+ if (len < sizeof(*out))
+ return -EINVAL;
+
+ memcpy(&workspace, buf, sizeof(workspace));
+
+ if (strncmp(workspace.header.signature, "RAWB", 4))
+ return -EINVAL;
+
+ if (workspace.header.checksum != bbt_checksum(&workspace))
+ return -EINVAL;
+
+ sort_bbt(&workspace);
+
+ memcpy(out, &workspace, sizeof(workspace));
+ return 0;
+}
+
+static int try_parse_bmt(struct bmt_table *out, u8 *buf, int len)
+{
+ static struct bmt_table workspace;
+
+ if (len < sizeof(*out))
+ return -EINVAL;
+
+ memcpy(&workspace, buf, sizeof(workspace));
+
+ if (strncmp(workspace.header.signature, "BMT", 3))
+ return -EINVAL;
+
+ /*
+ * The vendor firmware checksums over rblocks entries, but zero
+ * values do not affect the checksum so this works.
+ * We don't know rblocks while we're scanning and in any case
+ *Â it's a moving target, if a block fails in the reserve area,
+ * rblocks will increase by one. So we use the size from the
+ * header and if the vendor firmware left some trash in the
+ * buffer after the last entry, we're going to have an invalid
+ * checksum.
+ */
+ if (workspace.header.checksum !=
+ bmt_checksum(&workspace, workspace.header.size))
+ return -EINVAL;
+
+ memcpy(out, &workspace, sizeof(workspace));
+ return 0;
+}
+
+static int r_scan_reserve(struct en75_bmt_m *ctx)
+{
+ u16 total_blks = ctx->mtk->total_blks;
+ int cursor = total_blks - 1;
+ int good_blocks = 0;
+ int rblock = 0;
+ int rblocks_available = 0;
+ u8 fdm[4];
+
+ for (; cursor > 0; cursor--) {
+ int ret;
+ u8 *data_buf = ctx->mtk->data_buf;
+ u32 pg_size = ctx->mtk->pg_size;
+
+ if (rblock >= rblocks_available) {
+ rblocks_available += cursor;
+ ctx->rblocks = krealloc(
+ ctx->rblocks,
+ rblocks_available * sizeof(*ctx->rblocks),
+ GFP_KERNEL);
+ if (!ctx->rblocks)
+ return -ENOMEM;
+ }
+
+ for (int i = 0; i < INITIAL_READ_TRIES; i++) {
+ ret = bbt_nand_read(blk_pg(cursor),
+ data_buf,
+ pg_size,
+ fdm, sizeof(fdm));
+ if (!ret)
+ break;
+ }
+ struct block_info bif = {
+ .index = { .index = cursor },
+ .status = BS_INVALID
+ };
+
+ if (ret || fdm_is_bad(fdm)) {
+ pr_info("%s: skipping bad block %d in reserve area\n", log_pfx, cursor);
+ bif.status = BS_BAD;
+ } else if (fdm_is_mapped(fdm)) {
+ pr_debug("%s: found mapped block %d\n", log_pfx, cursor);
+ bif.status = BS_MAPPED;
+ } else if (!try_parse_bbt(&ctx->bbt, data_buf, pg_size)) {
+ pr_info("%s: found BBT in block %d\n", log_pfx, cursor);
+ bif.status = BS_BBT;
+ } else if (!try_parse_bmt(&ctx->bmt, data_buf, pg_size)) {
+ pr_info("%s: found BMT in block %d\n", log_pfx, cursor);
+ bif.status = BS_BMT;
+ } else if (block_is_erased(data_buf, pg_size, fdm, sizeof(fdm))) {
+ pr_debug("%s: found available block %d\n", log_pfx, cursor);
+ bif.status = BS_AVAILABLE;
+ } else {
+ pr_debug("%s: found block needing erase %d\n", log_pfx, cursor);
+ bif.status = BS_NEED_ERASE;
+ }
+
+ ctx->rblocks[rblock++] = bif;
+ good_blocks += (bif.status != BS_BAD);
+
+ if (good_blocks >= REQUIRED_GOOD_BLOCKS(total_blks))
+ break;
+ }
+ if (!cursor) {
+ pr_err("%s: not enough valid blocks found, need %d got %d\n",
+ log_pfx, REQUIRED_GOOD_BLOCKS(total_blks), good_blocks);
+ return -ENOSPC;
+ }
+ ctx->reserve_area_begin = cursor;
+ return 0;
+}
+
+static int w_factory_badblocks(struct en75_bmt_m *ctx, const u32 *blocks, int count)
+{
+ if (WARN_ON_ONCE(!ctx->bbt.header.version))
+ return -EINVAL;
+ if (WARN_ON_ONCE(!ctx->bmt.header.version))
+ return -EINVAL;
+
+ for (int i = 0; i < count; i++) {
+ if (blocks[i] >= ctx->reserve_area_begin) {
+ pr_err("%s: factory bad block %d not in user area\n",
+ log_pfx, blocks[i]);
+ return -EINVAL;
+ }
+ }
+
+ if (count > MAX_BBT_SIZE) {
+ pr_err("%s: Can't set %d factory bad blocks, limit is %d\n",
+ log_pfx, count, MAX_BBT_SIZE);
+ return -ENOSPC;
+ }
+
+ /* If ctx->can_write_factory_bbt is not set, this is just an assertion. */
+ if (!ctx->can_write_factory_bbt) {
+ if (ctx->bbt.header.size != count) {
+ pr_err("%s: factory bad block count mismatch %d != %d\n",
+ log_pfx, ctx->bbt.header.size, count);
+ return -EIO;
+ }
+ for (int i = 0; i < count; i++)
+ for (int j = 0; j < ctx->bbt.header.size; j++) {
+ if (ctx->bbt.table[j] == blocks[i])
+ break;
+ if (j == ctx->bbt.header.size - 1) {
+ pr_err("%s: factory bad block %d not in BBT\n",
+ log_pfx, blocks[i]);
+ return -EIO;
+ }
+ }
+ return 0;
+ }
+
+ /*
+ * Clear the BBT, and un-bad the blocks that will not be added back.
+ * We have to clear it because if we're adding a block, we can't
+ * properly check if it's mapped in the BMT unless it's removed from
+ * the BBT.
+ */
+ for (int i = ctx->bbt.header.size - 1; i >= 0; i--) {
+ int j = 0;
+
+ for (; j < count; j++)
+ if (ctx->bbt.table[i] == blocks[j])
+ break;
+
+ ctx->bbt.header.size--;
+ ctx->bbt.table[i] = ctx->bbt.table[ctx->bbt.header.size];
+ ctx->bbt.table[ctx->bbt.header.size] = 0;
+
+ /* It's going to be added back so let it stay bad. */
+ if (j < count)
+ continue;
+
+ /*
+ * Try to erase the bad marker, if it's impossible to erase
+ * then the BMT is going to catch it and map it out later.
+ * If you're running w_factory_badblocks(), then you're
+ * changing offsets so data loss is already guaranteed.
+ */
+ w_erase_one(ctx,
+ (struct block_index) { .index = ctx->bbt.table[i] },
+ true);
+ }
+
+ /* Unmap any block that is mapped in the BMT */
+ for (int i = 0; i < count; i++) {
+ int mapped_block;
+ int ret;
+
+ mapped_block = get_mapping_block(ctx, blocks[i]);
+ if (mapped_block < ctx->reserve_area_begin) {
+ pr_info("%s: factory bad block %d not mapped, no unmap needed\n",
+ log_pfx, blocks[i]);
+ continue; /* not mapped */
+ }
+
+ pr_info("%s: unmapping block %d to set as factory bad block\n",
+ log_pfx, blocks[i]);
+ ret = w_unmap_block(ctx, blocks[i], UE_NO_ERASE);
+ if (ret) {
+ pr_warn("%s: failed unmapping %d to set as factory bad block\n",
+ log_pfx, blocks[i]);
+ }
+ }
+
+ /* Add everything in the list to the BBT */
+ for (int i = 0; i < count; i++) {
+ int j = 0;
+
+ for (; j < ctx->bbt.header.size; j++)
+ if (ctx->bbt.table[j] == blocks[i])
+ break;
+
+ if (j < ctx->bbt.header.size)
+ continue;
+
+ ctx->bbt.table[ctx->bbt.header.size++] = blocks[i];
+
+ w_mark_bad(ctx, (struct block_index) { .index = blocks[i] }, true);
+ }
+
+ sort_bbt(&ctx->bbt);
+ ctx->bbt_dirty++;
+
+ return 0;
+}
+
+static int add_remap_range(struct en75_bmt_m *ctx, u16 begin_block, u16 size_blocks)
+{
+ if (((int)begin_block) + size_blocks > ctx->reserve_area_begin) {
+ pr_err("%s: remap range %d->%d exceeds user area\n",
+ log_pfx, begin_block, size_blocks);
+ return -EINVAL;
+ }
+ ctx->can_remap_ranges = krealloc(
+ ctx->can_remap_ranges,
+ (ctx->can_remap_range_count + 1) * sizeof(*ctx->can_remap_ranges),
+ GFP_KERNEL);
+ if (!ctx->can_remap_ranges)
+ return -ENOMEM;
+ ctx->can_remap_ranges[ctx->can_remap_range_count++] = (struct block_range) {
+ .begin = begin_block,
+ .end = begin_block + size_blocks
+ };
+ return 0;
+}
+
+static int w_init(struct en75_bmt_m *ctx, struct device_node *np)
+{
+ u32 factory_badblocks[MAX_FACTORY_BAD_BLOCKS_OF];
+ int factory_badblocks_count = -1;
+ int assert_reserve_size = -1;
+ int ret;
+
+ ret = r_scan_reserve(ctx);
+ if (ret)
+ return ret;
+
+ if (!of_property_read_u32(np, name_assert_reserve_size, &assert_reserve_size)) {
+ if (assert_reserve_size != reserve_block_count(ctx)) {
+ pr_err("%s: reserve area size mismatch %d != %d\n",
+ log_pfx, assert_reserve_size, reserve_block_count(ctx));
+ return -EINVAL;
+ }
+ }
+
+ if (of_property_read_bool(np, name_can_write_factory_bbt))
+ ctx->can_write_factory_bbt = 1;
+
+ ret = of_property_read_variable_u32_array(np, name_factory_badblocks,
+ factory_badblocks,
+ 0, MAX_FACTORY_BAD_BLOCKS_OF);
+ if (ret >= 0)
+ factory_badblocks_count = ret;
+
+ if (of_property_read_bool(np, name_enable_remap)) {
+ add_remap_range(ctx, 0, ctx->reserve_area_begin);
+ } else {
+ struct device_node *parts_np;
+ struct device_node *part_np;
+
+ parts_np = of_get_child_by_name(np, "partitions");
+ for_each_child_of_node(parts_np, part_np) {
+ u32 start;
+ u32 size;
+ const __be32 *reg;
+ int ret;
+
+ if (!of_property_read_bool(np, name_enable_remap))
+ continue;
+
+ reg = of_get_property(part_np, "reg", NULL);
+ if (!reg) {
+ pr_warn("%s: can't enable-remap on %pOF, no reg property\n",
+ log_pfx, part_np);
+ continue;
+ }
+
+ start = be32_to_cpup(reg);
+ if (start & ((1 << ctx->mtk->blk_shift) - 1)) {
+ pr_warn("%s: can't enable-remap on %pOF start not aligned\n",
+ log_pfx, part_np);
+ continue;
+ }
+ size = be32_to_cpup(®[1]);
+ if (size & ((1 << ctx->mtk->blk_shift) - 1)) {
+ pr_warn("%s: can't enable-remap on %pOF size not aligned\n",
+ log_pfx, part_np);
+ continue;
+ }
+ ret = add_remap_range(ctx,
+ start >> ctx->mtk->blk_shift,
+ size >> ctx->mtk->blk_shift);
+ if (ret)
+ pr_warn("%s: failed enable-remap on %pOF: %d\n",
+ log_pfx, part_np, ret);
+ else
+ pr_info("%s: enable-remap set for %pOF\n",
+ log_pfx, part_np);
+ }
+ if (parts_np)
+ of_node_put(parts_np);
+ }
+
+ if (ctx->bbt.header.version) {
+ if (!ctx->bmt.header.version) {
+ pr_info("%s: BBT found, BMT missing or corrupted\n", log_pfx);
+ r_reconstruct_bmt(ctx);
+ } else {
+ pr_info("%s: BBT & BMT found\n", log_pfx);
+ }
+ } else if (!ctx->can_write_factory_bbt) {
+ pr_err("%s: BBT not found and %s is unset, giving up\n",
+ log_pfx, name_can_write_factory_bbt);
+ return -EIO;
+ } else if (factory_badblocks_count > -1) {
+ pr_info("%s: BBT not found, reconstructing from %s\n",
+ log_pfx, name_factory_badblocks);
+ if (!ctx->bmt.header.version)
+ r_reconstruct_bmt(ctx);
+ } else if (ctx->bmt.header.version) {
+ pr_info("%s: BBT not found, BMT found, attempting reconstruction\n",
+ log_pfx);
+ ret = r_reconstruct_bbt(&ctx->bbt, ctx);
+ if (ret)
+ return ret;
+ } else {
+ pr_warn("%s: No BBT or BMT found, attempting reconstruction, LIKELY DATA LOSS!\n",
+ log_pfx);
+ r_reconstruct_bmt(ctx);
+ ret = r_reconstruct_bbt(&ctx->bbt, ctx);
+ if (ret)
+ return ret;
+ }
+
+ if (factory_badblocks_count > -1) {
+ int ret = w_factory_badblocks(ctx,
+ factory_badblocks,
+ factory_badblocks_count);
+ if (ret)
+ return ret;
+ }
+
+ pr_info("%s: blocks: total: %d, user: %d, factory_bad: %d, worn: %d reserve: %d\n",
+ log_pfx,
+ ctx->mtk->total_blks,
+ ctx->reserve_area_begin - ctx->bbt.header.size,
+ ctx->bbt.header.size,
+ ctx->bmt.header.size,
+ reserve_block_count(ctx)
+ );
+
+ for (int i = 0; i < ctx->bbt.header.size; i++)
+ pr_info(" - BBT factory bad block: %d\n", ctx->bbt.table[i]);
+ for (int i = 0; i < ctx->bmt.header.size; i++)
+ pr_info(" - BMT mapped worn block: %d->%d\n",
+ ctx->bmt.table[i].from, ctx->bmt.table[i].to);
+
+ ctx->mtk->mtd->size =
+ (ctx->reserve_area_begin - ctx->bbt.header.size) << ctx->mtk->blk_shift;
+ pr_info("%s: %u MiB usable space", log_pfx, (u32)ctx->mtk->mtd->size >> 20);
+
+ return 0;
+}
+
+/*
+ * Public functions (only these have direct access to the context)
+ */
+
+static struct en75_bmt_m en75_bmt_m;
+
+static int pub_init(struct device_node *np)
+{
+ int ret;
+
+ ret = w_init(&en75_bmt_m, np);
+ if (!ret)
+ w_sync_tables(&en75_bmt_m);
+ return ret;
+}
+
+/*
+ * If we return true, mtk_bmt will retry the operation and if it continues
+ * to fail, it will call us back 9 more times. If we return false, the user
+ * gets an error immediately.
+ *
+ * mtk_bmt might be calling us because:
+ * 1. user tried to read and it failed
+ * 2. user tried to read and there was a "concerning" amount of bit errors
+ * in this case, the user does not get an error if we return false.
+ * 3. user tried to write or erase and it failed
+ *
+ */
+static bool pub_remap_block(
+ u16 user_block,
+ u16 mapped_block_idx,
+ int copy_len)
+{
+ u16 block;
+ struct block_info *maybe_mapped_block = NULL;
+ int ret;
+
+ for (int i = 0; i < en75_bmt_m.can_remap_range_count; i++) {
+ if (user_block >= en75_bmt_m.can_remap_ranges[i].begin &&
+ user_block < en75_bmt_m.can_remap_ranges[i].end)
+ goto in_range;
+ }
+ return false;
+
+in_range:
+
+ block = get_mapping_block_bbt(&en75_bmt_m, user_block);
+ if (block < 0 || user_block >= en75_bmt_m.reserve_area_begin) {
+ pr_info("%s: remap: block %d out of range\n",
+ log_pfx, user_block);
+ return false;
+ }
+
+ if (mapped_block_idx != block) {
+ int rblocks = reserve_block_count(&en75_bmt_m);
+
+ for (int i = 0; i < rblocks; i++)
+ if (en75_bmt_m.rblocks[i].index.index == mapped_block_idx) {
+ maybe_mapped_block = &en75_bmt_m.rblocks[i];
+ break;
+ }
+ if (WARN_ON_ONCE(!maybe_mapped_block))
+ return false;
+ }
+
+ ret = w_remap_block(&en75_bmt_m, block, maybe_mapped_block, copy_len);
+ w_sync_tables(&en75_bmt_m);
+ if (ret == -ENOSPC)
+ return false;
+
+ return true;
+}
+
+static void pub_unmap_block(u16 user_block)
+{
+ int block;
+
+ block = get_mapping_block_bbt(&en75_bmt_m, user_block);
+ if (block < 0 || user_block >= en75_bmt_m.reserve_area_begin) {
+ pr_info("%s: unmap: block %d out of range\n",
+ log_pfx, user_block);
+ return;
+ }
+ w_unmap_block(&en75_bmt_m, block, UE_REQUIRE_ERASE);
+ w_sync_tables(&en75_bmt_m);
+}
+
+static int pub_debug(void *data, u64 val)
+{
+ return 0;
+}
+
+static int pub_get_mapping_block(int user_block)
+{
+ return get_mapping_block(&en75_bmt_m, user_block);
+}
+
+#undef bmtd
+static struct en75_bmt_m en75_bmt_m = {
+ .mtk = &bmtd,
+};
+
+const struct mtk_bmt_ops en75_bmt_ops = {
+ .init = pub_init,
+ .remap_block = pub_remap_block,
+ .unmap_block = pub_unmap_block,
+ .get_mapping_block = pub_get_mapping_block,
+ .debug = pub_debug,
+};
--- /dev/null
+include $(TOPDIR)/rules.mk
+include $(INCLUDE_DIR)/image.mk
+
+define Target/Description
+ Build firmware images for EcoNet MIPS based boards.
+endef
+
+# Devices will come in a later commit.
+
+$(eval $(call BuildImage))
--- /dev/null
+From 9773c540441c6ae15aefb49e67142e94369dbbc0 Mon Sep 17 00:00:00 2001
+Date: Sun, 30 Mar 2025 17:02:58 +0000
+Subject: [PATCH] dt-bindings: interrupt-controller: Add EcoNet EN751221 INTC
+
+Document the device tree binding for the interrupt controller in the
+EcoNet EN751221 MIPS SoC.
+
+---
+ .../econet,en751221-intc.yaml | 78 +++++++++++++++++++
+ 1 file changed, 78 insertions(+)
+ create mode 100644 Documentation/devicetree/bindings/interrupt-controller/econet,en751221-intc.yaml
+
+--- /dev/null
++++ b/Documentation/devicetree/bindings/interrupt-controller/econet,en751221-intc.yaml
+@@ -0,0 +1,78 @@
++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
++%YAML 1.2
++---
++$id: http://devicetree.org/schemas/interrupt-controller/econet,en751221-intc.yaml#
++$schema: http://devicetree.org/meta-schemas/core.yaml#
++
++title: EcoNet EN751221 Interrupt Controller
++
++maintainers:
++
++description:
++ The EcoNet EN751221 Interrupt Controller is a simple interrupt controller
++ designed for the MIPS 34Kc MT SMP processor with 2 VPEs. Each interrupt can
++ be routed to either VPE but not both, so to support per-CPU interrupts, a
++ secondary IRQ number is allocated to control masking/unmasking on VPE#1. For
++ lack of a better term we call these "shadow interrupts". The assignment of
++ shadow interrupts is defined by the SoC integrator when wiring the interrupt
++ lines, so they are configurable in the device tree.
++
++allOf:
++ - $ref: /schemas/interrupt-controller.yaml#
++
++properties:
++ compatible:
++ const: econet,en751221-intc
++
++ reg:
++ maxItems: 1
++
++ "#interrupt-cells":
++ const: 1
++
++ interrupt-controller: true
++
++ interrupts:
++ maxItems: 1
++ description: Interrupt line connecting this controller to its parent.
++
++ econet,shadow-interrupts:
++ $ref: /schemas/types.yaml#/definitions/uint32-matrix
++ description:
++ An array of interrupt number pairs where each pair represents a shadow
++ interrupt relationship. The first number in each pair is the primary IRQ,
++ and the second is its shadow IRQ used for VPE#1 control. For example,
++ <8 3> means IRQ 8 is shadowed by IRQ 3, so IRQ 3 cannot be mapped, but
++ when VPE#1 requests IRQ 8, it will manipulate the IRQ 3 mask bit.
++ minItems: 1
++ maxItems: 20
++ items:
++ items:
++ - description: primary per-CPU IRQ
++ - description: shadow IRQ number
++
++required:
++ - compatible
++ - reg
++ - interrupt-controller
++ - "#interrupt-cells"
++ - interrupts
++
++additionalProperties: false
++
++examples:
++ - |
++ interrupt-controller@1fb40000 {
++ compatible = "econet,en751221-intc";
++ reg = <0x1fb40000 0x100>;
++
++ interrupt-controller;
++ #interrupt-cells = <1>;
++
++ interrupt-parent = <&cpuintc>;
++ interrupts = <2>;
++
++ econet,shadow-interrupts = <7 2>, <8 3>, <13 12>, <30 29>;
++ };
++...
--- /dev/null
+From 1902a59cf5f9d8b99ecf0cb8f122cb00ef7a3f13 Mon Sep 17 00:00:00 2001
+Date: Sun, 30 Mar 2025 17:02:59 +0000
+Subject: [PATCH] irqchip: Add EcoNet EN751221 INTC
+
+Add a driver for the interrupt controller in the EcoNet EN751221 MIPS SoC.
+
+---
+ drivers/irqchip/Kconfig | 5 +
+ drivers/irqchip/Makefile | 1 +
+ drivers/irqchip/irq-econet-en751221.c | 309 ++++++++++++++++++++++++++
+ 3 files changed, 315 insertions(+)
+ create mode 100644 drivers/irqchip/irq-econet-en751221.c
+
+--- a/drivers/irqchip/Kconfig
++++ b/drivers/irqchip/Kconfig
+@@ -147,6 +147,11 @@ config DW_APB_ICTL
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN_HIERARCHY
+
++config ECONET_EN751221_INTC
++ bool
++ select GENERIC_IRQ_CHIP
++ select IRQ_DOMAIN
++
+ config FARADAY_FTINTC010
+ bool
+ select IRQ_DOMAIN
+--- a/drivers/irqchip/Makefile
++++ b/drivers/irqchip/Makefile
+@@ -10,6 +10,7 @@ obj-$(CONFIG_ARCH_BCM2835) += irq-bcm28
+ obj-$(CONFIG_ARCH_ACTIONS) += irq-owl-sirq.o
+ obj-$(CONFIG_DAVINCI_CP_INTC) += irq-davinci-cp-intc.o
+ obj-$(CONFIG_EXYNOS_IRQ_COMBINER) += exynos-combiner.o
++obj-$(CONFIG_ECONET_EN751221_INTC) += irq-econet-en751221.o
+ obj-$(CONFIG_FARADAY_FTINTC010) += irq-ftintc010.o
+ obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
+ obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o
+--- /dev/null
++++ b/drivers/irqchip/irq-econet-en751221.c
+@@ -0,0 +1,309 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * EN751221 Interrupt Controller Driver.
++ *
++ * The EcoNet EN751221 Interrupt Controller is a simple interrupt controller
++ * designed for the MIPS 34Kc MT SMP processor with 2 VPEs. Each interrupt can
++ * be routed to either VPE but not both, so to support per-CPU interrupts, a
++ * secondary IRQ number is allocated to control masking/unmasking on VPE#1. In
++ * this driver, these are called "shadow interrupts". The assignment of shadow
++ * interrupts is defined by the SoC integrator when wiring the interrupt lines,
++ * so they are configurable in the device tree.
++ *
++ * If an interrupt (say 30) needs per-CPU capability, the SoC integrator
++ * allocates another IRQ number (say 29) to be its shadow. The device tree
++ * reflects this by adding the pair <30 29> to the "econet,shadow-interrupts"
++ * property.
++ *
++ * When VPE#1 requests IRQ 30, the driver manipulates the mask bit for IRQ 29,
++ * telling the hardware to mask VPE#1's view of IRQ 30.
++ *
++ */
++
++#include <linux/cleanup.h>
++#include <linux/io.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
++#include <linux/irqdomain.h>
++#include <linux/irqchip.h>
++#include <linux/irqchip/chained_irq.h>
++
++#define IRQ_COUNT 40
++
++#define NOT_PERCPU 0xff
++#define IS_SHADOW 0xfe
++
++#define REG_MASK0 0x04
++#define REG_MASK1 0x50
++#define REG_PENDING0 0x08
++#define REG_PENDING1 0x54
++
++/**
++ * @membase: Base address of the interrupt controller registers
++ * @interrupt_shadows: Array of all interrupts, for each value,
++ * - NOT_PERCPU: This interrupt is not per-cpu, so it has no shadow
++ * - IS_SHADOW: This interrupt is a shadow of another per-cpu interrupt
++ * - else: This is a per-cpu interrupt whose shadow is the value
++ */
++static struct {
++ void __iomem *membase;
++ u8 interrupt_shadows[IRQ_COUNT];
++} econet_intc __ro_after_init;
++
++static DEFINE_RAW_SPINLOCK(irq_lock);
++
++/* IRQs must be disabled */
++static void econet_wreg(u32 reg, u32 val, u32 mask)
++{
++ u32 v;
++
++ guard(raw_spinlock)(&irq_lock);
++
++ v = ioread32(econet_intc.membase + reg);
++ v &= ~mask;
++ v |= val & mask;
++ iowrite32(v, econet_intc.membase + reg);
++}
++
++/* IRQs must be disabled */
++static void econet_chmask(u32 hwirq, bool unmask)
++{
++ u32 reg, mask;
++ u8 shadow;
++
++ /*
++ * If the IRQ is a shadow, it should never be manipulated directly.
++ * It should only be masked/unmasked as a result of the "real" per-cpu
++ * irq being manipulated by a thread running on VPE#1.
++ * If it is per-cpu (has a shadow), and we're on VPE#1, the shadow is what we mask.
++ * This is single processor only, so smp_processor_id() never exceeds 1.
++ */
++ shadow = econet_intc.interrupt_shadows[hwirq];
++ if (WARN_ON_ONCE(shadow == IS_SHADOW))
++ return;
++ else if (shadow != NOT_PERCPU && smp_processor_id() == 1)
++ hwirq = shadow;
++
++ if (hwirq >= 32) {
++ reg = REG_MASK1;
++ mask = BIT(hwirq - 32);
++ } else {
++ reg = REG_MASK0;
++ mask = BIT(hwirq);
++ }
++
++ econet_wreg(reg, unmask ? mask : 0, mask);
++}
++
++/* IRQs must be disabled */
++static void econet_intc_mask(struct irq_data *d)
++{
++ econet_chmask(d->hwirq, false);
++}
++
++/* IRQs must be disabled */
++static void econet_intc_unmask(struct irq_data *d)
++{
++ econet_chmask(d->hwirq, true);
++}
++
++static void econet_mask_all(void)
++{
++ /* IRQs are generally disabled during init, but guarding here makes it non-obligatory. */
++ guard(irqsave)();
++ econet_wreg(REG_MASK0, 0, ~0);
++ econet_wreg(REG_MASK1, 0, ~0);
++}
++
++static void econet_intc_handle_pending(struct irq_domain *d, u32 pending, u32 offset)
++{
++ int hwirq;
++
++ while (pending) {
++ hwirq = fls(pending) - 1;
++ generic_handle_domain_irq(d, hwirq + offset);
++ pending &= ~BIT(hwirq);
++ }
++}
++
++static void econet_intc_from_parent(struct irq_desc *desc)
++{
++ struct irq_chip *chip = irq_desc_get_chip(desc);
++ struct irq_domain *domain;
++ u32 pending0, pending1;
++
++ chained_irq_enter(chip, desc);
++
++ pending0 = ioread32(econet_intc.membase + REG_PENDING0);
++ pending1 = ioread32(econet_intc.membase + REG_PENDING1);
++
++ if (unlikely(!(pending0 | pending1))) {
++ spurious_interrupt();
++ } else {
++ domain = irq_desc_get_handler_data(desc);
++ econet_intc_handle_pending(domain, pending0, 0);
++ econet_intc_handle_pending(domain, pending1, 32);
++ }
++
++ chained_irq_exit(chip, desc);
++}
++
++static const struct irq_chip econet_irq_chip;
++
++static int econet_intc_map(struct irq_domain *d, u32 irq, irq_hw_number_t hwirq)
++{
++ int ret;
++
++ if (hwirq >= IRQ_COUNT) {
++ pr_err("%s: hwirq %lu out of range\n", __func__, hwirq);
++ return -EINVAL;
++ } else if (econet_intc.interrupt_shadows[hwirq] == IS_SHADOW) {
++ pr_err("%s: can't map hwirq %lu, it is a shadow interrupt\n", __func__, hwirq);
++ return -EINVAL;
++ }
++
++ if (econet_intc.interrupt_shadows[hwirq] == NOT_PERCPU) {
++ irq_set_chip_and_handler(irq, &econet_irq_chip, handle_level_irq);
++ } else {
++ irq_set_chip_and_handler(irq, &econet_irq_chip, handle_percpu_devid_irq);
++ ret = irq_set_percpu_devid(irq);
++ if (ret)
++ pr_warn("%s: Failed irq_set_percpu_devid for %u: %d\n", d->name, irq, ret);
++ }
++
++ irq_set_chip_data(irq, NULL);
++ return 0;
++}
++
++static const struct irq_chip econet_irq_chip = {
++ .name = "en751221-intc",
++ .irq_unmask = econet_intc_unmask,
++ .irq_mask = econet_intc_mask,
++ .irq_mask_ack = econet_intc_mask,
++};
++
++static const struct irq_domain_ops econet_domain_ops = {
++ .xlate = irq_domain_xlate_onecell,
++ .map = econet_intc_map
++};
++
++static int __init get_shadow_interrupts(struct device_node *node)
++{
++ const char *field = "econet,shadow-interrupts";
++ int num_shadows;
++
++ num_shadows = of_property_count_u32_elems(node, field);
++
++ memset(econet_intc.interrupt_shadows, NOT_PERCPU,
++ sizeof(econet_intc.interrupt_shadows));
++
++ if (num_shadows <= 0) {
++ return 0;
++ } else if (num_shadows % 2) {
++ pr_err("%pOF: %s count is odd, ignoring\n", node, field);
++ return 0;
++ }
++
++ u32 *shadows __free(kfree) = kmalloc_array(num_shadows, sizeof(u32), GFP_KERNEL);
++ if (!shadows)
++ return -ENOMEM;
++
++ if (of_property_read_u32_array(node, field, shadows, num_shadows)) {
++ pr_err("%pOF: Failed to read %s\n", node, field);
++ return -EINVAL;
++ }
++
++ for (int i = 0; i < num_shadows; i += 2) {
++ u32 shadow = shadows[i + 1];
++ u32 target = shadows[i];
++
++ if (shadow > IRQ_COUNT) {
++ pr_err("%pOF: %s[%d] shadow(%d) out of range\n",
++ node, field, i + 1, shadow);
++ continue;
++ }
++
++ if (target >= IRQ_COUNT) {
++ pr_err("%pOF: %s[%d] target(%d) out of range\n", node, field, i, target);
++ continue;
++ }
++
++ if (econet_intc.interrupt_shadows[target] != NOT_PERCPU) {
++ pr_err("%pOF: %s[%d] target(%d) already has a shadow\n",
++ node, field, i, target);
++ continue;
++ }
++
++ if (econet_intc.interrupt_shadows[shadow] != NOT_PERCPU) {
++ pr_err("%pOF: %s[%d] shadow(%d) already has a target\n",
++ node, field, i + 1, shadow);
++ continue;
++ }
++
++ econet_intc.interrupt_shadows[target] = shadow;
++ econet_intc.interrupt_shadows[shadow] = IS_SHADOW;
++ }
++
++ return 0;
++}
++
++static int __init econet_intc_of_init(struct device_node *node, struct device_node *parent)
++{
++ struct irq_domain *domain;
++ struct resource res;
++ int ret, irq;
++
++ ret = get_shadow_interrupts(node);
++ if (ret)
++ return ret;
++
++ irq = irq_of_parse_and_map(node, 0);
++ if (!irq) {
++ pr_err("%pOF: DT: Failed to get IRQ from 'interrupts'\n", node);
++ return -EINVAL;
++ }
++
++ if (of_address_to_resource(node, 0, &res)) {
++ pr_err("%pOF: DT: Failed to get 'reg'\n", node);
++ ret = -EINVAL;
++ goto err_dispose_mapping;
++ }
++
++ if (!request_mem_region(res.start, resource_size(&res), res.name)) {
++ pr_err("%pOF: Failed to request memory\n", node);
++ ret = -EBUSY;
++ goto err_dispose_mapping;
++ }
++
++ econet_intc.membase = ioremap(res.start, resource_size(&res));
++ if (!econet_intc.membase) {
++ pr_err("%pOF: Failed to remap membase\n", node);
++ ret = -ENOMEM;
++ goto err_release;
++ }
++
++ econet_mask_all();
++
++ domain = irq_domain_add_linear(node, IRQ_COUNT, &econet_domain_ops, NULL);
++ if (!domain) {
++ pr_err("%pOF: Failed to add irqdomain\n", node);
++ ret = -ENOMEM;
++ goto err_unmap;
++ }
++
++ irq_set_chained_handler_and_data(irq, econet_intc_from_parent, domain);
++
++ return 0;
++
++err_unmap:
++ iounmap(econet_intc.membase);
++err_release:
++ release_mem_region(res.start, resource_size(&res));
++err_dispose_mapping:
++ irq_dispose_mapping(irq);
++ return ret;
++}
++
++IRQCHIP_DECLARE(econet_en751221_intc, "econet,en751221-intc", econet_intc_of_init);
--- /dev/null
+From 9e0dd98654a528735d2b363d0dc73f7904108652 Mon Sep 17 00:00:00 2001
+Date: Sun, 30 Mar 2025 17:02:57 +0000
+Subject: [PATCH] dt-bindings: vendor-prefixes: Add EcoNet
+
+Add the "econet" vendor prefix for SoC maker
+
+---
+ Documentation/devicetree/bindings/vendor-prefixes.yaml | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
++++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
+@@ -420,6 +420,8 @@ patternProperties:
+ description: EBV Elektronik
+ "^eckelmann,.*":
+ description: Eckelmann AG
++ "^econet,.*":
++ description: EcoNet (HK) Limited
+ "^edgeble,.*":
+ description: Edgeble AI Technologies Pvt. Ltd.
+ "^edimax,.*":
--- /dev/null
+From 30fddbd5325459102e448c9a26a1bc15ef563381 Mon Sep 17 00:00:00 2001
+Date: Wed, 7 May 2025 13:44:54 +0000
+Subject: [PATCH] dt-bindings: timer: Add EcoNet EN751221 "HPT" CPU Timer
+
+Add device tree bindings for the so-called high-precision timer (HPT)
+in the EcoNet EN751221 SoC.
+
+---
+ .../bindings/timer/econet,en751221-timer.yaml | 80 +++++++++++++++++++
+ 1 file changed, 80 insertions(+)
+ create mode 100644 Documentation/devicetree/bindings/timer/econet,en751221-timer.yaml
+
+--- /dev/null
++++ b/Documentation/devicetree/bindings/timer/econet,en751221-timer.yaml
+@@ -0,0 +1,80 @@
++# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
++%YAML 1.2
++---
++$id: http://devicetree.org/schemas/timer/econet,en751221-timer.yaml#
++$schema: http://devicetree.org/meta-schemas/core.yaml#
++
++title: EcoNet EN751221 High Precision Timer (HPT)
++
++maintainers:
++
++description:
++ The EcoNet High Precision Timer (HPT) is a timer peripheral found in various
++ EcoNet SoCs, including the EN751221 and EN751627 families. It provides per-VPE
++ count/compare registers and a per-CPU control register, with a single interrupt
++ line using a percpu-devid interrupt mechanism.
++
++properties:
++ compatible:
++ oneOf:
++ - const: econet,en751221-timer
++ - items:
++ - const: econet,en751627-timer
++ - const: econet,en751221-timer
++
++ reg:
++ minItems: 1
++ maxItems: 2
++
++ interrupts:
++ maxItems: 1
++ description: A percpu-devid timer interrupt shared across CPUs.
++
++ clocks:
++ maxItems: 1
++
++required:
++ - compatible
++ - reg
++ - interrupts
++ - clocks
++
++allOf:
++ - if:
++ properties:
++ compatible:
++ contains:
++ const: econet,en751627-timer
++ then:
++ properties:
++ reg:
++ items:
++ - description: VPE timers 0 and 1
++ - description: VPE timers 2 and 3
++ else:
++ properties:
++ reg:
++ items:
++ - description: VPE timers 0 and 1
++
++additionalProperties: false
++
++examples:
++ - |
++ timer@1fbf0400 {
++ compatible = "econet,en751627-timer", "econet,en751221-timer";
++ reg = <0x1fbf0400 0x100>, <0x1fbe0000 0x100>;
++ interrupt-parent = <&intc>;
++ interrupts = <30>;
++ clocks = <&hpt_clock>;
++ };
++ - |
++ timer@1fbf0400 {
++ compatible = "econet,en751221-timer";
++ reg = <0x1fbe0400 0x100>;
++ interrupt-parent = <&intc>;
++ interrupts = <30>;
++ clocks = <&hpt_clock>;
++ };
++...
--- /dev/null
+From 3b4c33ac87d0d11308f4445ecec2a124e2e77724 Mon Sep 17 00:00:00 2001
+Date: Wed, 7 May 2025 13:44:55 +0000
+Subject: [PATCH] clocksource/drivers: Add EcoNet Timer HPT driver
+
+Introduce a clocksource driver for the so-called high-precision timer (HPT)
+in the EcoNet EN751221 and EN751627 MIPS SoCs.
+
+It's a 32 bit upward-counting one-shot timer which relies on the crystal so it
+is unaffected by CPU power mode. On MIPS 34K devices (single core) there is
+one timer, and on 1004K devices (dual core) there are two.
+
+Each timer has two sets of count/compare registers so that there is one for
+each of the VPEs on the core. Because each core has 2 VPEs, register selection
+takes the CPU number / 2 for the timer corrisponding to the core, then CPU
+number % 2 for the register corrisponding to the VPE.
+
+These timers use a percpu-devid IRQ to route interrupts to the VPE which set
+the event.
+
+---
+ drivers/clocksource/Kconfig | 8 +
+ drivers/clocksource/Makefile | 1 +
+ drivers/clocksource/timer-econet-en751221.c | 216 ++++++++++++++++++++
+ 3 files changed, 225 insertions(+)
+ create mode 100644 drivers/clocksource/timer-econet-en751221.c
+
+--- a/drivers/clocksource/Kconfig
++++ b/drivers/clocksource/Kconfig
+@@ -73,6 +73,14 @@ config DW_APB_TIMER_OF
+ select DW_APB_TIMER
+ select TIMER_OF
+
++config ECONET_EN751221_TIMER
++ bool "EcoNet EN751221 High Precision Timer" if COMPILE_TEST
++ depends on HAS_IOMEM
++ select CLKSRC_MMIO
++ select TIMER_OF
++ help
++ Support for CPU timer found on EcoNet MIPS based SoCs.
++
+ config FTTMR010_TIMER
+ bool "Faraday Technology timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+--- a/drivers/clocksource/Makefile
++++ b/drivers/clocksource/Makefile
+@@ -17,6 +17,7 @@ obj-$(CONFIG_CLKBLD_I8253) += i8253.o
+ obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
+ obj-$(CONFIG_DAVINCI_TIMER) += timer-davinci.o
+ obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o
++obj-$(CONFIG_ECONET_EN751221_TIMER) += timer-econet-en751221.o
+ obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
+ obj-$(CONFIG_OMAP_DM_SYSTIMER) += timer-ti-dm-systimer.o
+ obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
+--- /dev/null
++++ b/drivers/clocksource/timer-econet-en751221.c
+@@ -0,0 +1,216 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Timer present on EcoNet EN75xx MIPS based SoCs.
++ *
++ */
++
++#include <linux/io.h>
++#include <linux/cpumask.h>
++#include <linux/interrupt.h>
++#include <linux/clockchips.h>
++#include <linux/sched_clock.h>
++#include <linux/of.h>
++#include <linux/of_irq.h>
++#include <linux/of_address.h>
++#include <linux/cpuhotplug.h>
++#include <linux/clk.h>
++
++#define ECONET_BITS 32
++#define ECONET_MIN_DELTA 0x00001000
++#define ECONET_MAX_DELTA GENMASK(ECONET_BITS - 2, 0)
++/* 34Kc hardware has 1 block and 1004Kc has 2. */
++#define ECONET_NUM_BLOCKS DIV_ROUND_UP(NR_CPUS, 2)
++
++static struct {
++ void __iomem *membase[ECONET_NUM_BLOCKS];
++ u32 freq_hz;
++} econet_timer __ro_after_init;
++
++static DEFINE_PER_CPU(struct clock_event_device, econet_timer_pcpu);
++
++/* Each memory block has 2 timers, the order of registers is:
++ * CTL, CMR0, CNT0, CMR1, CNT1
++ */
++static inline void __iomem *reg_ctl(u32 timer_n)
++{
++ return econet_timer.membase[timer_n >> 1];
++}
++
++static inline void __iomem *reg_compare(u32 timer_n)
++{
++ return econet_timer.membase[timer_n >> 1] + (timer_n & 1) * 0x08 + 0x04;
++}
++
++static inline void __iomem *reg_count(u32 timer_n)
++{
++ return econet_timer.membase[timer_n >> 1] + (timer_n & 1) * 0x08 + 0x08;
++}
++
++static inline u32 ctl_bit_enabled(u32 timer_n)
++{
++ return 1U << (timer_n & 1);
++}
++
++static inline u32 ctl_bit_pending(u32 timer_n)
++{
++ return 1U << ((timer_n & 1) + 16);
++}
++
++static bool cevt_is_pending(int cpu_id)
++{
++ return ioread32(reg_ctl(cpu_id)) & ctl_bit_pending(cpu_id);
++}
++
++static irqreturn_t cevt_interrupt(int irq, void *dev_id)
++{
++ struct clock_event_device *dev = this_cpu_ptr(&econet_timer_pcpu);
++ int cpu = cpumask_first(dev->cpumask);
++
++ /* Each VPE has its own events,
++ * so this will only happen on spurious interrupt.
++ */
++ if (!cevt_is_pending(cpu))
++ return IRQ_NONE;
++
++ iowrite32(ioread32(reg_count(cpu)), reg_compare(cpu));
++ dev->event_handler(dev);
++ return IRQ_HANDLED;
++}
++
++static int cevt_set_next_event(ulong delta, struct clock_event_device *dev)
++{
++ u32 next;
++ int cpu;
++
++ cpu = cpumask_first(dev->cpumask);
++ next = ioread32(reg_count(cpu)) + delta;
++ iowrite32(next, reg_compare(cpu));
++
++ if ((s32)(next - ioread32(reg_count(cpu))) < ECONET_MIN_DELTA / 2)
++ return -ETIME;
++
++ return 0;
++}
++
++static int cevt_init_cpu(uint cpu)
++{
++ struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, cpu);
++ u32 reg;
++
++ pr_debug("%s: Setting up clockevent for CPU %d\n", cd->name, cpu);
++
++ reg = ioread32(reg_ctl(cpu)) | ctl_bit_enabled(cpu);
++ iowrite32(reg, reg_ctl(cpu));
++
++ enable_percpu_irq(cd->irq, IRQ_TYPE_NONE);
++
++ /* Do this last because it synchronously configures the timer */
++ clockevents_config_and_register(cd, econet_timer.freq_hz,
++ ECONET_MIN_DELTA, ECONET_MAX_DELTA);
++
++ return 0;
++}
++
++static u64 notrace sched_clock_read(void)
++{
++ /* Always read from clock zero no matter the CPU */
++ return (u64)ioread32(reg_count(0));
++}
++
++/* Init */
++
++static void __init cevt_dev_init(uint cpu)
++{
++ iowrite32(0, reg_count(cpu));
++ iowrite32(U32_MAX, reg_compare(cpu));
++}
++
++static int __init cevt_init(struct device_node *np)
++{
++ int i, irq, ret;
++
++ irq = irq_of_parse_and_map(np, 0);
++ if (irq <= 0) {
++ pr_err("%pOFn: irq_of_parse_and_map failed", np);
++ return -EINVAL;
++ }
++
++ ret = request_percpu_irq(irq, cevt_interrupt, np->name, &econet_timer_pcpu);
++
++ if (ret < 0) {
++ pr_err("%pOFn: IRQ %d setup failed (%d)\n", np, irq, ret);
++ goto err_unmap_irq;
++ }
++
++ for_each_possible_cpu(i) {
++ struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, i);
++
++ cd->rating = 310,
++ cd->features = CLOCK_EVT_FEAT_ONESHOT |
++ CLOCK_EVT_FEAT_C3STOP |
++ CLOCK_EVT_FEAT_PERCPU;
++ cd->set_next_event = cevt_set_next_event;
++ cd->irq = irq;
++ cd->cpumask = cpumask_of(i);
++ cd->name = np->name;
++
++ cevt_dev_init(i);
++ }
++
++ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
++ "clockevents/econet/timer:starting",
++ cevt_init_cpu, NULL);
++ return 0;
++
++err_unmap_irq:
++ irq_dispose_mapping(irq);
++ return ret;
++}
++
++static int __init timer_init(struct device_node *np)
++{
++ int num_blocks = DIV_ROUND_UP(num_possible_cpus(), 2);
++ struct clk *clk;
++ int ret;
++
++ clk = of_clk_get(np, 0);
++ if (IS_ERR(clk)) {
++ pr_err("%pOFn: Failed to get CPU clock from DT %ld\n", np, PTR_ERR(clk));
++ return PTR_ERR(clk);
++ }
++
++ econet_timer.freq_hz = clk_get_rate(clk);
++
++ for (int i = 0; i < num_blocks; i++) {
++ econet_timer.membase[i] = of_iomap(np, i);
++ if (!econet_timer.membase[i]) {
++ pr_err("%pOFn: failed to map register [%d]\n", np, i);
++ return -ENXIO;
++ }
++ }
++
++ /* For clocksource purposes always read clock zero, whatever the CPU */
++ ret = clocksource_mmio_init(reg_count(0), np->name,
++ econet_timer.freq_hz, 301, ECONET_BITS,
++ clocksource_mmio_readl_up);
++ if (ret) {
++ pr_err("%pOFn: clocksource_mmio_init failed: %d", np, ret);
++ return ret;
++ }
++
++ ret = cevt_init(np);
++ if (ret < 0)
++ return ret;
++
++ sched_clock_register(sched_clock_read, ECONET_BITS,
++ econet_timer.freq_hz);
++
++ pr_info("%pOFn: using %u.%03u MHz high precision timer\n", np,
++ econet_timer.freq_hz / 1000000,
++ (econet_timer.freq_hz / 1000) % 1000);
++
++ return 0;
++}
++
++TIMER_OF_DECLARE(econet_timer_hpt, "econet,en751221-timer", timer_init);
--- /dev/null
+From be8b4173719a61fdd8379e86895d855775cf5f91 Mon Sep 17 00:00:00 2001
+Date: Wed, 7 May 2025 13:44:56 +0000
+Subject: [PATCH] dt-bindings: mips: Add EcoNet platform binding
+
+Document the top-level device tree binding for EcoNet MIPS-based SoCs.
+
+---
+ .../devicetree/bindings/mips/econet.yaml | 26 +++++++++++++++++++
+ 1 file changed, 26 insertions(+)
+ create mode 100644 Documentation/devicetree/bindings/mips/econet.yaml
+
+--- /dev/null
++++ b/Documentation/devicetree/bindings/mips/econet.yaml
+@@ -0,0 +1,26 @@
++# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
++%YAML 1.2
++---
++$id: http://devicetree.org/schemas/mips/econet.yaml#
++$schema: http://devicetree.org/meta-schemas/core.yaml#
++
++title: EcoNet MIPS SoCs
++
++maintainers:
++
++properties:
++ $nodename:
++ const: '/'
++
++ compatible:
++ oneOf:
++ - description: Boards with EcoNet EN751221 family SoC
++ items:
++ - enum:
++ - smartfiber,xp8421-b
++ - const: econet,en751221
++
++additionalProperties: true
++
++...
--- /dev/null
+From 35fb26f94dfa1b291086b84b2421f957214824d1 Mon Sep 17 00:00:00 2001
+Date: Wed, 7 May 2025 13:44:57 +0000
+Subject: [PATCH] mips: Add EcoNet MIPS platform support
+
+Add platform support for EcoNet MIPS SoCs.
+
+---
+ arch/mips/Kbuild.platforms | 1 +
+ arch/mips/Kconfig | 25 +++++++++
+ arch/mips/boot/compressed/uart-16550.c | 5 ++
+ arch/mips/econet/Kconfig | 37 ++++++++++++
+ arch/mips/econet/Makefile | 2 +
+ arch/mips/econet/Platform | 5 ++
+ arch/mips/econet/init.c | 78 ++++++++++++++++++++++++++
+ 7 files changed, 153 insertions(+)
+ create mode 100644 arch/mips/econet/Kconfig
+ create mode 100644 arch/mips/econet/Makefile
+ create mode 100644 arch/mips/econet/Platform
+ create mode 100644 arch/mips/econet/init.c
+
+--- a/arch/mips/Kbuild.platforms
++++ b/arch/mips/Kbuild.platforms
+@@ -11,6 +11,7 @@ platform-$(CONFIG_CAVIUM_OCTEON_SOC) +=
+ platform-$(CONFIG_EYEQ) += mobileye/
+ platform-$(CONFIG_MIPS_COBALT) += cobalt/
+ platform-$(CONFIG_MACH_DECSTATION) += dec/
++platform-$(CONFIG_ECONET) += econet/
+ platform-$(CONFIG_MIPS_GENERIC) += generic/
+ platform-$(CONFIG_MACH_JAZZ) += jazz/
+ platform-$(CONFIG_LANTIQ) += lantiq/
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -388,6 +388,30 @@ config MACH_DECSTATION
+
+ otherwise choose R3000.
+
++config ECONET
++ bool "EcoNet MIPS family"
++ select BOOT_RAW
++ select CPU_BIG_ENDIAN
++ select DEBUG_ZBOOT
++ select EARLY_PRINTK_8250
++ select ECONET_EN751221_TIMER
++ select SERIAL_OF_PLATFORM
++ select SYS_SUPPORTS_BIG_ENDIAN
++ select SYS_HAS_CPU_MIPS32_R1
++ select SYS_HAS_CPU_MIPS32_R2
++ select SYS_HAS_EARLY_PRINTK
++ select SYS_SUPPORTS_32BIT_KERNEL
++ select SYS_SUPPORTS_MIPS16
++ select SYS_SUPPORTS_ZBOOT_UART16550
++ select USE_GENERIC_EARLY_PRINTK_8250
++ select USE_OF
++ help
++ EcoNet EN75xx MIPS devices are big endian MIPS machines used
++ in XPON (fiber) and DSL applications. They have SPI, PCI, USB,
++ GPIO, and Ethernet, with optional XPON, DSL, and VoIP DSP cores.
++ Don't confuse these with the Airoha ARM devices sometimes referred
++ to as "EcoNet", this family is for MIPS based devices only.
++
+ config MACH_JAZZ
+ bool "Jazz family of machines"
+ select ARC_MEMORY
+@@ -1017,6 +1041,7 @@ source "arch/mips/ath79/Kconfig"
+ source "arch/mips/bcm47xx/Kconfig"
+ source "arch/mips/bcm63xx/Kconfig"
+ source "arch/mips/bmips/Kconfig"
++source "arch/mips/econet/Kconfig"
+ source "arch/mips/generic/Kconfig"
+ source "arch/mips/ingenic/Kconfig"
+ source "arch/mips/jazz/Kconfig"
+--- a/arch/mips/boot/compressed/uart-16550.c
++++ b/arch/mips/boot/compressed/uart-16550.c
+@@ -20,6 +20,11 @@
+ #define PORT(offset) (CKSEG1ADDR(INGENIC_UART_BASE_ADDR) + (4 * offset))
+ #endif
+
++#ifdef CONFIG_ECONET
++#define EN75_UART_BASE 0x1fbf0003
++#define PORT(offset) (CKSEG1ADDR(EN75_UART_BASE) + (4 * (offset)))
++#endif
++
+ #ifndef IOTYPE
+ #define IOTYPE char
+ #endif
+--- /dev/null
++++ b/arch/mips/econet/Kconfig
+@@ -0,0 +1,37 @@
++# SPDX-License-Identifier: GPL-2.0
++if ECONET
++
++choice
++ prompt "EcoNet SoC selection"
++ default SOC_ECONET_EN751221
++ help
++ Select EcoNet MIPS SoC type. Individual SoCs within a family are
++ very similar, so is it enough to select the right family, and
++ then customize to the specific SoC using the device tree only.
++
++ config SOC_ECONET_EN751221
++ bool "EN751221 family"
++ select COMMON_CLK
++ select ECONET_EN751221_INTC
++ select IRQ_MIPS_CPU
++ select SMP
++ select SMP_UP
++ select SYS_SUPPORTS_SMP
++ help
++ The EN751221 family includes EN7512, RN7513, EN7521, EN7526.
++ They are based on single core MIPS 34Kc processors. To boot
++ this kernel, you will need a device tree such as
++ MIPS_RAW_APPENDED_DTB=y, and a root filesystem.
++endchoice
++
++choice
++ prompt "Devicetree selection"
++ default DTB_ECONET_NONE
++ help
++ Select the devicetree.
++
++ config DTB_ECONET_NONE
++ bool "None"
++endchoice
++
++endif
+--- /dev/null
++++ b/arch/mips/econet/Makefile
+@@ -0,0 +1,2 @@
++
++obj-y := init.o
+--- /dev/null
++++ b/arch/mips/econet/Platform
+@@ -0,0 +1,5 @@
++# To address a 7.2MB kernel size limit in the EcoNet SDK bootloader,
++# we put the load address well above where the bootloader loads and then use
++# zboot. So please set CONFIG_ZBOOT_LOAD_ADDRESS to the address where your
++# bootloader actually places the kernel.
++load-$(CONFIG_ECONET) += 0xffffffff81000000
+--- /dev/null
++++ b/arch/mips/econet/init.c
+@@ -0,0 +1,78 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * EcoNet setup code
++ *
++ */
++
++#include <linux/init.h>
++#include <linux/of_clk.h>
++#include <linux/irqchip.h>
++
++#include <asm/addrspace.h>
++#include <asm/io.h>
++#include <asm/bootinfo.h>
++#include <asm/time.h>
++#include <asm/prom.h>
++#include <asm/smp-ops.h>
++#include <asm/reboot.h>
++
++#define CR_AHB_RSTCR ((void __iomem *)CKSEG1ADDR(0x1fb00040))
++#define RESET BIT(31)
++
++#define UART_BASE CKSEG1ADDR(0x1fbf0003)
++#define UART_REG_SHIFT 2
++
++static void hw_reset(char *command)
++{
++ iowrite32(RESET, CR_AHB_RSTCR);
++}
++
++/* 1. Bring up early printk. */
++void __init prom_init(void)
++{
++ setup_8250_early_printk_port(UART_BASE, UART_REG_SHIFT, 0);
++ _machine_restart = hw_reset;
++}
++
++/* 2. Parse the DT and find memory */
++void __init plat_mem_setup(void)
++{
++ void *dtb;
++
++ set_io_port_base(KSEG1);
++
++ dtb = get_fdt();
++ if (!dtb)
++ panic("no dtb found");
++
++ __dt_setup_arch(dtb);
++
++ early_init_dt_scan_memory();
++}
++
++/* 3. Overload __weak device_tree_init(), add SMP_UP ops */
++void __init device_tree_init(void)
++{
++ unflatten_and_copy_device_tree();
++
++ register_up_smp_ops();
++}
++
++const char *get_system_type(void)
++{
++ return "EcoNet-EN75xx";
++}
++
++/* 4. Initialize the IRQ subsystem */
++void __init arch_init_irq(void)
++{
++ irqchip_init();
++}
++
++/* 5. Timers */
++void __init plat_time_init(void)
++{
++ of_clk_init(NULL);
++ timer_probe();
++}
--- /dev/null
+From abc2d0bc2cb7c1412b8b254c0446f94b3e203c7c Mon Sep 17 00:00:00 2001
+Date: Wed, 7 May 2025 13:44:58 +0000
+Subject: [PATCH] dt-bindings: vendor-prefixes: Add SmartFiber
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add "smartfiber" vendor prefix for manufactorer of EcoNet based boards.
+
+---
+ Documentation/devicetree/bindings/vendor-prefixes.yaml | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
++++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
+@@ -1368,6 +1368,8 @@ patternProperties:
+ description: SKOV A/S
+ "^skyworks,.*":
+ description: Skyworks Solutions, Inc.
++ "^smartfiber,.*":
++ description: ShenZhen Smartfiber Technology Co, Ltd.
+ "^smartlabs,.*":
+ description: SmartLabs LLC
+ "^smartrg,.*":
--- /dev/null
+From 0ec4887009729297f7c10368084e41a8a9fbbd0e Mon Sep 17 00:00:00 2001
+Date: Wed, 7 May 2025 13:44:59 +0000
+Subject: [PATCH] mips: dts: Add EcoNet DTS with EN751221 and SmartFiber
+ XP8421-B board
+
+Add DTS files in support of EcoNet platform, including SmartFiber XP8421-B,
+a low cost commercially available board based on EN751221.
+
+---
+ arch/mips/boot/dts/Makefile | 1 +
+ arch/mips/boot/dts/econet/Makefile | 2 +
+ arch/mips/boot/dts/econet/en751221.dtsi | 67 +++++++++++++++++++
+ .../econet/en751221_smartfiber_xp8421-b.dts | 19 ++++++
+ arch/mips/econet/Kconfig | 11 +++
+ 5 files changed, 100 insertions(+)
+ create mode 100644 arch/mips/boot/dts/econet/Makefile
+ create mode 100644 arch/mips/boot/dts/econet/en751221.dtsi
+ create mode 100644 arch/mips/boot/dts/econet/en751221_smartfiber_xp8421-b.dts
+
+--- a/arch/mips/boot/dts/Makefile
++++ b/arch/mips/boot/dts/Makefile
+@@ -1,6 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ subdir-$(CONFIG_BMIPS_GENERIC) += brcm
+ subdir-$(CONFIG_CAVIUM_OCTEON_SOC) += cavium-octeon
++subdir-$(CONFIG_ECONET) += econet
+ subdir-$(CONFIG_EYEQ) += mobileye
+ subdir-$(CONFIG_FIT_IMAGE_FDT_MARDUK) += img
+ subdir-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += img
+--- /dev/null
++++ b/arch/mips/boot/dts/econet/Makefile
+@@ -0,0 +1,2 @@
++# SPDX-License-Identifier: GPL-2.0
++dtb-$(CONFIG_DTB_ECONET_SMARTFIBER_XP8421_B) += en751221_smartfiber_xp8421-b.dtb
+--- /dev/null
++++ b/arch/mips/boot/dts/econet/en751221.dtsi
+@@ -0,0 +1,67 @@
++// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
++/dts-v1/;
++
++/ {
++ compatible = "econet,en751221";
++ #address-cells = <1>;
++ #size-cells = <1>;
++
++ hpt_clock: clock {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <200000000>; /* 200 MHz */
++ };
++
++ cpus: cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cpu@0 {
++ device_type = "cpu";
++ compatible = "mips,mips24KEc";
++ reg = <0>;
++ };
++ };
++
++ cpuintc: interrupt-controller {
++ compatible = "mti,cpu-interrupt-controller";
++ interrupt-controller;
++ #address-cells = <0>;
++ #interrupt-cells = <1>;
++ };
++
++ intc: interrupt-controller@1fb40000 {
++ compatible = "econet,en751221-intc";
++ reg = <0x1fb40000 0x100>;
++ interrupt-parent = <&cpuintc>;
++ interrupts = <2>;
++
++ interrupt-controller;
++ #interrupt-cells = <1>;
++ econet,shadow-interrupts = <7 2>, <8 3>, <13 12>, <30 29>;
++ };
++
++ uart: serial@1fbf0000 {
++ compatible = "ns16550";
++ reg = <0x1fbf0000 0x30>;
++ reg-io-width = <4>;
++ reg-shift = <2>;
++ interrupt-parent = <&intc>;
++ interrupts = <0>;
++ /*
++ * Conversion of baud rate to clock frequency requires a
++ * computation that is not in the ns16550 driver, so this
++ * uart is fixed at 115200 baud.
++ */
++ clock-frequency = <1843200>;
++ };
++
++ timer_hpt: timer@1fbf0400 {
++ compatible = "econet,en751221-timer";
++ reg = <0x1fbf0400 0x100>;
++
++ interrupt-parent = <&intc>;
++ interrupts = <30>;
++ clocks = <&hpt_clock>;
++ };
++};
+--- /dev/null
++++ b/arch/mips/boot/dts/econet/en751221_smartfiber_xp8421-b.dts
+@@ -0,0 +1,19 @@
++// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
++/dts-v1/;
++
++#include "en751221.dtsi"
++
++/ {
++ model = "SmartFiber XP8421-B";
++ compatible = "smartfiber,xp8421-b", "econet,en751221";
++
++ memory@0 {
++ device_type = "memory";
++ reg = <0x00000000 0x1c000000>;
++ };
++
++ chosen {
++ stdout-path = "/serial@1fbf0000:115200";
++ linux,usable-memory-range = <0x00020000 0x1bfe0000>;
++ };
++};
+--- a/arch/mips/econet/Kconfig
++++ b/arch/mips/econet/Kconfig
+@@ -32,6 +32,17 @@ choice
+
+ config DTB_ECONET_NONE
+ bool "None"
++
++ config DTB_ECONET_SMARTFIBER_XP8421_B
++ bool "EN751221 SmartFiber XP8421-B"
++ depends on SOC_ECONET_EN751221
++ select BUILTIN_DTB
++ help
++ The SmartFiber XP8421-B is a device based on the EN751221 SoC.
++ It has 512MB of memory and 256MB of NAND flash. This kernel
++ needs only an appended initramfs to boot. It can be loaded
++ through XMODEM and booted from memory in the bootloader, or
++ it can be packed in tclinux.trx format and written to flash.
+ endchoice
+
+ endif
--- /dev/null
+From faefb0a59c5914b7b8f737e2ec5c82822e5bc4c7 Mon Sep 17 00:00:00 2001
+Date: Wed, 7 May 2025 13:45:00 +0000
+Subject: [PATCH] MAINTAINERS: Add entry for newly added EcoNet platform.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add a MAINTAINERS entry as part of integration of the EcoNet MIPS platform.
+
+---
+ MAINTAINERS | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -8019,6 +8019,18 @@ W: https://linuxtv.org
+ Q: http://patchwork.linuxtv.org/project/linux-media/list/
+ F: drivers/media/dvb-frontends/ec100*
+
++ECONET MIPS PLATFORM
++S: Maintained
++F: Documentation/devicetree/bindings/interrupt-controller/econet,en751221-intc.yaml
++F: Documentation/devicetree/bindings/mips/econet.yaml
++F: Documentation/devicetree/bindings/timer/econet,en751221-timer.yaml
++F: arch/mips/boot/dts/econet/
++F: arch/mips/econet/
++F: drivers/clocksource/timer-econet-en751221.c
++F: drivers/irqchip/irq-econet-en751221.c
++
+ ECRYPT FILE SYSTEM
--- /dev/null
+From 79ee1d20e37cd553cc961962fca8107e69a0c293 Mon Sep 17 00:00:00 2001
+Date: Wed, 21 May 2025 21:33:33 +0000
+Subject: [PATCH] mips: econet: Fix incorrect Kconfig dependencies
+
+config ECONET selects SERIAL_OF_PLATFORM and that depends on SERIAL_8250
+so we need to select SERIAL_8250 directly.
+Also do not enable DEBUG_ZBOOT unless DEBUG_KERNEL is set.
+
+---
+ arch/mips/Kconfig | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -392,9 +392,10 @@ config ECONET
+ bool "EcoNet MIPS family"
+ select BOOT_RAW
+ select CPU_BIG_ENDIAN
+- select DEBUG_ZBOOT
++ select DEBUG_ZBOOT if DEBUG_KERNEL
+ select EARLY_PRINTK_8250
+ select ECONET_EN751221_TIMER
++ select SERIAL_8250
+ select SERIAL_OF_PLATFORM
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_HAS_CPU_MIPS32_R1
--- /dev/null
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -370,6 +370,12 @@ config SPI_DLN2
+ This driver can also be built as a module. If so, the module
+ will be called spi-dln2.
+
++config SPI_AIROHA_EN7523
++ bool "Airoha EN7523 SPI controller support"
++ depends on ARCH_AIROHA
++ help
++ This enables SPI controller support for the Airoha EN7523 SoC.
++
+ config SPI_EP93XX
+ tristate "Cirrus Logic EP93xx SPI controller"
+ depends on ARCH_EP93XX || COMPILE_TEST
+--- a/drivers/spi/Makefile
++++ b/drivers/spi/Makefile
+@@ -52,6 +52,7 @@ obj-$(CONFIG_SPI_DW_BT1) += spi-dw-bt1.
+ obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o
+ obj-$(CONFIG_SPI_DW_PCI) += spi-dw-pci.o
+ obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o
++obj-$(CONFIG_SPI_AIROHA_EN7523) += spi-en7523.o
+ obj-$(CONFIG_SPI_FALCON) += spi-falcon.o
+ obj-$(CONFIG_SPI_FSI) += spi-fsi.o
+ obj-$(CONFIG_SPI_FSL_CPM) += spi-fsl-cpm.o
+--- /dev/null
++++ b/drivers/spi/spi-en7523.c
+@@ -0,0 +1,313 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/mod_devicetable.h>
++#include <linux/spi/spi.h>
++
++
++#define ENSPI_READ_IDLE_EN 0x0004
++#define ENSPI_MTX_MODE_TOG 0x0014
++#define ENSPI_RDCTL_FSM 0x0018
++#define ENSPI_MANUAL_EN 0x0020
++#define ENSPI_MANUAL_OPFIFO_EMPTY 0x0024
++#define ENSPI_MANUAL_OPFIFO_WDATA 0x0028
++#define ENSPI_MANUAL_OPFIFO_FULL 0x002C
++#define ENSPI_MANUAL_OPFIFO_WR 0x0030
++#define ENSPI_MANUAL_DFIFO_FULL 0x0034
++#define ENSPI_MANUAL_DFIFO_WDATA 0x0038
++#define ENSPI_MANUAL_DFIFO_EMPTY 0x003C
++#define ENSPI_MANUAL_DFIFO_RD 0x0040
++#define ENSPI_MANUAL_DFIFO_RDATA 0x0044
++#define ENSPI_IER 0x0090
++#define ENSPI_NFI2SPI_EN 0x0130
++
++// TODO not in spi block
++#define ENSPI_CLOCK_DIVIDER ((void __iomem *)0x1fa201c4)
++
++#define OP_CSH 0x00
++#define OP_CSL 0x01
++#define OP_CK 0x02
++#define OP_OUTS 0x08
++#define OP_OUTD 0x09
++#define OP_OUTQ 0x0A
++#define OP_INS 0x0C
++#define OP_INS0 0x0D
++#define OP_IND 0x0E
++#define OP_INQ 0x0F
++#define OP_OS2IS 0x10
++#define OP_OS2ID 0x11
++#define OP_OS2IQ 0x12
++#define OP_OD2IS 0x13
++#define OP_OD2ID 0x14
++#define OP_OD2IQ 0x15
++#define OP_OQ2IS 0x16
++#define OP_OQ2ID 0x17
++#define OP_OQ2IQ 0x18
++#define OP_OSNIS 0x19
++#define OP_ODNID 0x1A
++
++#define MATRIX_MODE_AUTO 1
++#define CONF_MTX_MODE_AUTO 0
++#define MANUALEN_AUTO 0
++#define MATRIX_MODE_MANUAL 0
++#define CONF_MTX_MODE_MANUAL 9
++#define MANUALEN_MANUAL 1
++
++#define _ENSPI_MAX_XFER 0x1ff
++
++#define REG(x) (iobase + x)
++
++
++static void __iomem *iobase;
++
++
++static void opfifo_write(u32 cmd, u32 len)
++{
++ u32 tmp = ((cmd & 0x1f) << 9) | (len & 0x1ff);
++
++ writel(tmp, REG(ENSPI_MANUAL_OPFIFO_WDATA));
++
++ /* Wait for room in OPFIFO */
++ while (readl(REG(ENSPI_MANUAL_OPFIFO_FULL)))
++ ;
++
++ /* Shift command into OPFIFO */
++ writel(1, REG(ENSPI_MANUAL_OPFIFO_WR));
++
++ /* Wait for command to finish */
++ while (!readl(REG(ENSPI_MANUAL_OPFIFO_EMPTY)))
++ ;
++}
++
++static void set_cs(int state)
++{
++ if (state)
++ opfifo_write(OP_CSH, 1);
++ else
++ opfifo_write(OP_CSL, 1);
++}
++
++static void manual_begin_cmd(void)
++{
++ /* Disable read idle state */
++ writel(0, REG(ENSPI_READ_IDLE_EN));
++
++ /* Wait for FSM to reach idle state */
++ while (readl(REG(ENSPI_RDCTL_FSM)))
++ ;
++
++ /* Set SPI core to manual mode */
++ writel(CONF_MTX_MODE_MANUAL, REG(ENSPI_MTX_MODE_TOG));
++ writel(MANUALEN_MANUAL, REG(ENSPI_MANUAL_EN));
++}
++
++static void manual_end_cmd(void)
++{
++ /* Set SPI core to auto mode */
++ writel(CONF_MTX_MODE_AUTO, REG(ENSPI_MTX_MODE_TOG));
++ writel(MANUALEN_AUTO, REG(ENSPI_MANUAL_EN));
++
++ /* Enable read idle state */
++ writel(1, REG(ENSPI_READ_IDLE_EN));
++}
++
++static void dfifo_read(u8 *buf, int len)
++{
++ int i;
++
++ for (i = 0; i < len; i++) {
++ /* Wait for requested data to show up in DFIFO */
++ while (readl(REG(ENSPI_MANUAL_DFIFO_EMPTY)))
++ ;
++ buf[i] = readl(REG(ENSPI_MANUAL_DFIFO_RDATA));
++ /* Queue up next byte */
++ writel(1, REG(ENSPI_MANUAL_DFIFO_RD));
++ }
++}
++
++static void dfifo_write(const u8 *buf, int len)
++{
++ int i;
++
++ for (i = 0; i < len; i++) {
++ /* Wait for room in DFIFO */
++ while (readl(REG(ENSPI_MANUAL_DFIFO_FULL)))
++ ;
++ writel(buf[i], REG(ENSPI_MANUAL_DFIFO_WDATA));
++ }
++}
++
++#if 0
++static void set_spi_clock_speed(int freq_mhz)
++{
++ u32 tmp, val;
++
++ tmp = readl(ENSPI_CLOCK_DIVIDER);
++ tmp &= 0xffff0000;
++ writel(tmp, ENSPI_CLOCK_DIVIDER);
++
++ val = (400 / (freq_mhz * 2));
++ tmp |= (val << 8) | 1;
++ writel(tmp, ENSPI_CLOCK_DIVIDER);
++}
++#endif
++
++static void init_hw(void)
++{
++ /* Disable manual/auto mode clash interrupt */
++ writel(0, REG(ENSPI_IER));
++
++ // TODO via clk framework
++ // set_spi_clock_speed(50);
++
++ /* Disable DMA */
++ writel(0, REG(ENSPI_NFI2SPI_EN));
++}
++
++static int xfer_read(struct spi_transfer *xfer)
++{
++ int opcode;
++ uint8_t *buf = xfer->rx_buf;
++
++ switch (xfer->rx_nbits) {
++ case SPI_NBITS_SINGLE:
++ opcode = OP_INS;
++ break;
++ case SPI_NBITS_DUAL:
++ opcode = OP_IND;
++ break;
++ case SPI_NBITS_QUAD:
++ opcode = OP_INQ;
++ break;
++ }
++
++ opfifo_write(opcode, xfer->len);
++ dfifo_read(buf, xfer->len);
++
++ return xfer->len;
++}
++
++static int xfer_write(struct spi_transfer *xfer, int next_xfer_is_rx)
++{
++ int opcode;
++ const uint8_t *buf = xfer->tx_buf;
++
++ if (next_xfer_is_rx) {
++ /* need to use Ox2Ix opcode to set the core to input afterwards */
++ switch (xfer->tx_nbits) {
++ case SPI_NBITS_SINGLE:
++ opcode = OP_OS2IS;
++ break;
++ case SPI_NBITS_DUAL:
++ opcode = OP_OS2ID;
++ break;
++ case SPI_NBITS_QUAD:
++ opcode = OP_OS2IQ;
++ break;
++ }
++ } else {
++ switch (xfer->tx_nbits) {
++ case SPI_NBITS_SINGLE:
++ opcode = OP_OUTS;
++ break;
++ case SPI_NBITS_DUAL:
++ opcode = OP_OUTD;
++ break;
++ case SPI_NBITS_QUAD:
++ opcode = OP_OUTQ;
++ break;
++ }
++ }
++
++ opfifo_write(opcode, xfer->len);
++ dfifo_write(buf, xfer->len);
++
++ return xfer->len;
++}
++
++size_t max_transfer_size(struct spi_device *spi)
++{
++ return _ENSPI_MAX_XFER;
++}
++
++int transfer_one_message(struct spi_controller *ctrl, struct spi_message *msg)
++{
++ struct spi_transfer *xfer;
++ int next_xfer_is_rx = 0;
++
++ manual_begin_cmd();
++ set_cs(0);
++ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
++ if (xfer->tx_buf) {
++ if (!list_is_last(&xfer->transfer_list, &msg->transfers)
++ && list_next_entry(xfer, transfer_list)->rx_buf != NULL)
++ next_xfer_is_rx = 1;
++ else
++ next_xfer_is_rx = 0;
++ msg->actual_length += xfer_write(xfer, next_xfer_is_rx);
++ } else if (xfer->rx_buf) {
++ msg->actual_length += xfer_read(xfer);
++ }
++ }
++ set_cs(1);
++ manual_end_cmd();
++
++ msg->status = 0;
++ spi_finalize_current_message(ctrl);
++
++ return 0;
++}
++
++static int spi_probe(struct platform_device *pdev)
++{
++ struct spi_controller *ctrl;
++ int err;
++
++ ctrl = devm_spi_alloc_master(&pdev->dev, 0);
++ if (!ctrl) {
++ dev_err(&pdev->dev, "Error allocating SPI controller\n");
++ return -ENOMEM;
++ }
++
++ iobase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
++ if (IS_ERR(iobase)) {
++ dev_err(&pdev->dev, "Could not map SPI register address");
++ return -ENOMEM;
++ }
++
++ init_hw();
++
++ ctrl->dev.of_node = pdev->dev.of_node;
++ ctrl->flags = SPI_CONTROLLER_HALF_DUPLEX;
++ ctrl->mode_bits = SPI_RX_DUAL | SPI_TX_DUAL;
++ ctrl->max_transfer_size = max_transfer_size;
++ ctrl->transfer_one_message = transfer_one_message;
++ err = devm_spi_register_controller(&pdev->dev, ctrl);
++ if (err) {
++ dev_err(&pdev->dev, "Could not register SPI controller\n");
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
++static const struct of_device_id spi_of_ids[] = {
++ { .compatible = "airoha,en7523-spi" },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, spi_of_ids);
++
++static struct platform_driver spi_driver = {
++ .probe = spi_probe,
++ .driver = {
++ .name = "airoha-en7523-spi",
++ .of_match_table = spi_of_ids,
++ },
++};
++
++module_platform_driver(spi_driver);
++
++MODULE_LICENSE("GPL v2");
++MODULE_DESCRIPTION("Airoha EN7523 SPI driver");
--- /dev/null
+Subject: Adapt Airoha EN7523 SPI to work with EcoNet EN751221
+
+The SPI driver from Airoha EN7523 is copied here in it's original form
+so this patch makes three updates to it in order to make it work
+correctly in the EcoNet EN751221 context.
+
+The main change here is that the chip select operation is sent twice.
+This pattern is borrowed from the vendor code and it prevents write
+operations from being lost sporadically on the EN751221.
+
+---
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -372,7 +372,7 @@ config SPI_DLN2
+
+ config SPI_AIROHA_EN7523
+ bool "Airoha EN7523 SPI controller support"
+- depends on ARCH_AIROHA
++ depends on ARCH_AIROHA || ECONET
+ help
+ This enables SPI controller support for the Airoha EN7523 SoC.
+
+--- a/drivers/spi/spi-en7523.c
++++ b/drivers/spi/spi-en7523.c
+@@ -82,10 +82,11 @@ static void opfifo_write(u32 cmd, u32 le
+
+ static void set_cs(int state)
+ {
+- if (state)
+- opfifo_write(OP_CSH, 1);
+- else
+- opfifo_write(OP_CSL, 1);
++ u32 cmd = state ? OP_CSH : OP_CSL;
++
++ /* EN751221 drops writes if we don't send this twice. */
++ opfifo_write(cmd, 1);
++ opfifo_write(cmd, 1);
+ }
+
+ static void manual_begin_cmd(void)
+@@ -226,12 +227,12 @@ static int xfer_write(struct spi_transfe
+ return xfer->len;
+ }
+
+-size_t max_transfer_size(struct spi_device *spi)
++static size_t max_transfer_size(struct spi_device *spi)
+ {
+ return _ENSPI_MAX_XFER;
+ }
+
+-int transfer_one_message(struct spi_controller *ctrl, struct spi_message *msg)
++static int transfer_one_message(struct spi_controller *ctrl, struct spi_message *msg)
+ {
+ struct spi_transfer *xfer;
+ int next_xfer_is_rx = 0;
--- /dev/null
+Subject: Add EcoNet bad block table
+
+The EcoNet BBT/BMT is used for resolving bad blocks in the flash.
+It is implemented in the EcoNet bootloader so you cannot safely
+access flash without using it.
+
+---
+--- a/drivers/mtd/nand/Makefile
++++ b/drivers/mtd/nand/Makefile
+@@ -3,7 +3,7 @@
+ nandcore-objs := core.o bbt.o
+ obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
+ obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
+-obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bmt.o mtk_bmt_v2.o mtk_bmt_bbt.o mtk_bmt_nmbm.o
++obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bmt.o mtk_bmt_v2.o mtk_bmt_bbt.o mtk_bmt_nmbm.o en75_bmt.o
+ ifeq ($(CONFIG_SPI_QPIC_SNAND),y)
+ obj-$(CONFIG_SPI_QPIC_SNAND) += qpic_common.o
+ else
+--- a/drivers/mtd/nand/mtk_bmt.h
++++ b/drivers/mtd/nand/mtk_bmt.h
+@@ -77,6 +77,7 @@ extern struct bmt_desc bmtd;
+ extern const struct mtk_bmt_ops mtk_bmt_v2_ops;
+ extern const struct mtk_bmt_ops mtk_bmt_bbt_ops;
+ extern const struct mtk_bmt_ops mtk_bmt_nmbm_ops;
++extern const struct mtk_bmt_ops en75_bmt_ops;
+
+ static inline u32 blk_pg(u16 block)
+ {
+--- a/drivers/mtd/nand/mtk_bmt.c
++++ b/drivers/mtd/nand/mtk_bmt.c
+@@ -422,6 +422,8 @@ int mtk_bmt_attach(struct mtd_info *mtd)
+ bmtd.ops = &mtk_bmt_nmbm_ops;
+ else if (of_property_read_bool(np, "mediatek,bbt"))
+ bmtd.ops = &mtk_bmt_bbt_ops;
++ else if (of_property_read_bool(np, "econet,bmt"))
++ bmtd.ops = &en75_bmt_ops;
+ else
+ return 0;
+
--- /dev/null
+--- a/drivers/mtd/nand/spi/core.c
++++ b/drivers/mtd/nand/spi/core.c
+@@ -19,6 +19,7 @@
+ #include <linux/string.h>
+ #include <linux/spi/spi.h>
+ #include <linux/spi/spi-mem.h>
++#include <linux/mtd/mtk_bmt.h>
+
+ static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
+ {
+@@ -1525,6 +1526,7 @@ static int spinand_probe(struct spi_mem
+ if (ret)
+ return ret;
+
++ mtk_bmt_attach(mtd);
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ goto err_spinand_cleanup;
+@@ -1532,6 +1534,7 @@ static int spinand_probe(struct spi_mem
+ return 0;
+
+ err_spinand_cleanup:
++ mtk_bmt_detach(mtd);
+ spinand_cleanup(spinand);
+
+ return ret;
+@@ -1550,6 +1553,7 @@ static int spinand_remove(struct spi_mem
+ if (ret)
+ return ret;
+
++ mtk_bmt_detach(mtd);
+ spinand_cleanup(spinand);
+
+ return 0;