Userdata is compatible with the standard AWS EKS Terraform module, with the sole recommendation being the utilization of a custom AMI. In order to use instance-store you also need to install local-static-provisioner - https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner

Terraform example:

 eks-dev-instance-store = {
      instance_types = ["r6id.large"]

      min_size     = 1
      max_size     = 3
      desired_size = 1

      block_device_mappings = {
        # Root volume
        xvda = {
          device_name = "/dev/xvda"
          ebs = {
            volume_size           = 24
            volume_type           = "gp3"
            iops                  = 3000
            encrypted             = false
            delete_on_termination = true
          }
        }
      }

      ami_id = data.aws_ami.ubuntu.image_id
      # The virtual device name (ephemeralN). Instance store volumes are numbered
      # starting from 0. An instance type with 2 available instance store volumes
      # can specify mappings for ephemeral0 and ephemeral1. The number of available
      # instance store volumes depends on the instance type. After you connect to
      # the instance, you must mount the volume - here, we are using user data to automatically
      # mount the volume(s) during instance creation.
      #
      # NVMe instance store volumes are automatically enumerated and assigned a device
      # name. Including them in your block device mapping has no effect.
      # post_bootstrap_user_data

      enable_bootstrap_user_data = true
      # NVMe instance store volumes are automatically enumerated and assigned a device
      pre_bootstrap_user_data = <<-EOT
      echo "Running a custom user data script"
      set -ex
      apt-get update
      apt-get install -y nvme-cli mdadm xfsprogs
      # Fetch the list of NVMe devices
      DEVICES=$(lsblk -d -o NAME | grep nvme)
      DISK_ARRAY=()
      for DEV in $DEVICES
      do
        # Exclude the root disk, /dev/nvme0n1, from the list of devices
        if [[ $${DEV} != "nvme0n1" ]]; then
          NVME_INFO=$(nvme id-ctrl --raw-binary "/dev/$${DEV}" | cut -c3073-3104 | tr -s ' ' | sed 's/ $//g')
          # Check if the device is Amazon EC2 NVMe Instance Storage
          if [[ $${NVME_INFO} == *"ephemeral"* ]]; then
            DISK_ARRAY+=("/dev/$${DEV}")
          fi
        fi
      done
      DISK_COUNT=$${#DISK_ARRAY[@]}
      if [ $${DISK_COUNT} -eq 0 ]; then
        echo "No NVMe SSD disks available. No further action needed."
      else
        if [ $${DISK_COUNT} -eq 1 ]; then
          TARGET_DEV=$${DISK_ARRAY[0]}
          mkfs.xfs $${TARGET_DEV}
        else
          mdadm --create --verbose /dev/md0 --level=0 --raid-devices=$${DISK_COUNT} $${DISK_ARRAY[@]}
          mkfs.xfs /dev/md0
          TARGET_DEV=/dev/md0
        fi
        mkdir -p /local1
        echo $${TARGET_DEV} /local1 xfs defaults,noatime 1 2 >> /etc/fstab
        mount -a
        /usr/bin/chown -hR +999:+1000 /local1
      fi
      EOT

      labels = {
        group = "instance-store"
      }

      taints = {
        dedicated = {
          key    = "group"
          value  = "instance-store"
          effect = "NO_SCHEDULE"
        }
      }
      update_config = {
        max_unavailable_percentage = 25
      }

      tags = {
        ExtraTag                                  = "instance-store"
        "k8s.io/cluster-autoscaler/enabled"       = "true"
        "k8s.io/cluster-autoscaler/${local.name}" = "owned"
      }
    }