Duplicate Document Inserted for Elastic Sink Connector

I am using JdbcSourceConnector to publish data from oracle database. Same table rows published multiple times with updated value. But when elastic search sink connector can’t resolve the primary key of the published data. That’s why it inserted multiple documents for same row of oracle database, it doesn’t replace the existing one.
My question is how can I change my configuration so that even if same row of oracle db published multiple times, only one document will be inserted or updated based on the primary key of oracle db ?

Here is my source connector configuration:

{
“name”: “source-connector-txn-oracle”,
“config”: {
“connector.class”: “io.confluent.connect.jdbc.JdbcSourceConnector”,
“connection.url”: “jdbc:oracle:thin:oracle-db”,
“connection.user”: “username”,
“connection.password”: “password”,
“connection.attempts”: “10”,
“connection.backoff.ms”: “2160000”,
“table.types”: “TABLE,VIEW”,
“table.whitelist”: “Table_Name”,
“mode”: “timestamp+incrementing”,
“timestamp.column.name”: “UPDATED,CREATED”,
“incrementing.column.name”: “ID”,
“validate.non.null”: “false”,
“poll.interval.ms”: “2000”,
“batch.max.rows”: “100”,
“table.poll.interval.ms”: “2000”,
“topic.prefix”: “ELK_”,
“timestamp.delay.interval.ms”: “0”,
“tasks.max”: “10”,
“topic.creation.enable”: “true”,
“topic.creation.default.replication.factor”: “2”,
“topic.creation.default.partitions”: “10”,
“config.action.reload”: “restart”,
“errors.retry.timeout”: “0”,
“errors.retry.delay.max.ms”: "60000 ",
“errors.tolerance”: “none”,
“errors.log.enable”: “true”,
“errors.log.include.messages”: “true”,
“db.timezone”: “Asia/Dhaka”
}
}

Here is my elastic sink connector configuration:
{
“name”: “sink-connector-elastic-search”,
“config”: {
“connector.class”: “io.confluent.connect.elasticsearch.ElasticsearchSinkConnector”,
“connection.url”: “connectionUrl”,
“connection.username”: “username”,
“connection.password”: “password”,
“batch.size”: “1000”,
“write.method”: “UPSERT”,
“pk.mode”: “record_value”,
“pk.fields”: “ID”,
“type.name”: “_doc”,
“key.ignore”: “true”,
“auto.create”: “false”,
“auto.evolve”: “false”,
“max.retries”: “10”,
“retry.backoff.ms”: “2160000”,
“tasks.max”: “10”,
“topics”: “TopicName”,
“index”: “indexName”,
“errors.retry.timeout”: “0”,
“errors.retry.delay.max.ms”: “60000”,
“errors.tolerance”: “none”,
“errors.log.enable”: “false”,
“errors.log.include.messages”: “false”,
“errors.deadletterqueue.topic.name”: “”,
“errors.deadletterqueue.topic.replication.factor”: “2”,
“errors.deadletterqueue.context.headers.enable”: “false”,
“quote.sql.identifiers”: “NEVER”,
“document.id.strategy”: “io.confluent.connect.elasticsearch.ElasticsearchSinkConnector$RecordFieldStrategy”,
“document.id.field”: “ID”
}
}