#include <linux/iio/iio.h>
 #include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
 #include <linux/iio/trigger.h>
 #include <linux/iio/trigger_consumer.h>
 #include <linux/iio/triggered_buffer.h>
        int scale;
        struct iio_trigger *trig;
        int steps_enabled;
+       bool step_event_en;
        /* Correct time stamp alignment */
        struct {
                __le16 buff[3];
        { }
 };
 
+static const struct iio_event_spec bma400_step_detect_event = {
+       .type = IIO_EV_TYPE_CHANGE,
+       .dir = IIO_EV_DIR_NONE,
+       .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+};
+
 #define BMA400_ACC_CHANNEL(_index, _axis) { \
        .type = IIO_ACCEL, \
        .modified = 1, \
                .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
                                      BIT(IIO_CHAN_INFO_ENABLE),
                .scan_index = -1, /* No buffer support */
+               .event_spec = &bma400_step_detect_event,
+               .num_event_specs = 1,
        },
        IIO_CHAN_SOFT_TIMESTAMP(4),
 };
        }
 }
 
+static int bma400_read_event_config(struct iio_dev *indio_dev,
+                                   const struct iio_chan_spec *chan,
+                                   enum iio_event_type type,
+                                   enum iio_event_direction dir)
+{
+       struct bma400_data *data = iio_priv(indio_dev);
+
+       switch (chan->type) {
+       case IIO_STEPS:
+               return data->step_event_en;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int bma400_steps_event_enable(struct bma400_data *data, int state)
+{
+       int ret;
+
+       ret = bma400_enable_steps(data, 1);
+       if (ret)
+               return ret;
+
+       ret = regmap_update_bits(data->regmap, BMA400_INT12_MAP_REG,
+                                BMA400_STEP_INT_MSK,
+                                FIELD_PREP(BMA400_STEP_INT_MSK,
+                                           state));
+       if (ret)
+               return ret;
+       data->step_event_en = state;
+       return 0;
+}
+
+static int bma400_write_event_config(struct iio_dev *indio_dev,
+                                    const struct iio_chan_spec *chan,
+                                    enum iio_event_type type,
+                                    enum iio_event_direction dir, int state)
+{
+       struct bma400_data *data = iio_priv(indio_dev);
+       int ret;
+
+       switch (chan->type) {
+       case IIO_STEPS:
+               mutex_lock(&data->mutex);
+               ret = bma400_steps_event_enable(data, state);
+               mutex_unlock(&data->mutex);
+               return ret;
+       default:
+               return -EINVAL;
+       }
+}
+
 static int bma400_data_rdy_trigger_set_state(struct iio_trigger *trig,
                                             bool state)
 {
        .read_avail        = bma400_read_avail,
        .write_raw         = bma400_write_raw,
        .write_raw_get_fmt = bma400_write_raw_get_fmt,
+       .read_event_config = bma400_read_event_config,
+       .write_event_config = bma400_write_event_config,
 };
 
 static const struct iio_trigger_ops bma400_trigger_ops = {
 {
        struct iio_dev *indio_dev = private;
        struct bma400_data *data = iio_priv(indio_dev);
+       s64 timestamp = iio_get_time_ns(indio_dev);
        int ret;
 
        /* Lock to protect the data->status */
        if (ret || !data->status)
                goto unlock_err;
 
+       if (FIELD_GET(BMA400_STEP_STAT_MASK, le16_to_cpu(data->status))) {
+               iio_push_event(indio_dev,
+                              IIO_MOD_EVENT_CODE(IIO_STEPS, 0, IIO_NO_MOD,
+                                                 IIO_EV_TYPE_CHANGE,
+                                                 IIO_EV_DIR_NONE),
+                              timestamp);
+       }
+
        if (FIELD_GET(BMA400_INT_DRDY_MSK, le16_to_cpu(data->status))) {
                mutex_unlock(&data->mutex);
                iio_trigger_poll_chained(data->trig);
                return IRQ_HANDLED;
        }
 
+       mutex_unlock(&data->mutex);
+       return IRQ_HANDLED;
+
 unlock_err:
        mutex_unlock(&data->mutex);
        return IRQ_NONE;