From 0b439503125f40bbf0e23ad2fa18512e9cdd04ed Mon Sep 17 00:00:00 2001 From: "d.buechler" <d.buechler@adito.de> Date: Wed, 24 Jul 2019 15:16:35 +0200 Subject: [PATCH] =?UTF-8?q?Das=20Dubletten=20Entity=20wurde=20um=20weitere?= =?UTF-8?q?=20Parameter=20erg=C3=A4nzt.=20Es=20kann=20nun=20die=20ID=20ein?= =?UTF-8?q?es=20Datensatzes=20mitgegeben=20werden,=20welcher=20in=20Form?= =?UTF-8?q?=20eines=20weiteren=20Filters=20exkludiert=20wird.=20Dadurch=20?= =?UTF-8?q?ist=20es=20m=C3=B6glich,=20den=20Datensatz,=20nach=20dem=20aktu?= =?UTF-8?q?ell=20gesucht=20wird,=20von=20der=20Ergebnisliste=20auszuschlie?= =?UTF-8?q?=C3=9Fen.=20Es=20wurde=20weitere=20Fehlerbehandlung=20eingebaut?= =?UTF-8?q?.=20Bei=20der=20Validierung=20ob=20es=20bereits=20eine=20Filter?= =?UTF-8?q?name-Kombination=20gibt=20wurde=20ein=20Fehler=20behoben,=20bei?= =?UTF-8?q?=20dem=20auch=20beim=20Bearbeiten=20validiert=20und=20damit=20d?= =?UTF-8?q?as=20Speichern=20verhindert=20wurde.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../entityfields/filter_name/onValidation.js | 5 ++ .../Duplicates_entity/Duplicates_entity.aod | 11 +++-- .../maxreturnvaluecount/valueProcess.js | 2 + .../jditorecordcontainer/contentProcess.js | 12 +++-- .../recordidtoignore_param/valueProcess.js | 3 ++ .../valuestoscan_param/valueProcess.js | 3 +- process/DuplicateScanner_lib/process.js | 47 ++++++++++++++----- 7 files changed, 64 insertions(+), 19 deletions(-) create mode 100644 entity/Duplicates_entity/entityfields/maxreturnvaluecount/valueProcess.js create mode 100644 entity/Person_entity/entityfields/personduplicates/children/recordidtoignore_param/valueProcess.js diff --git a/entity/DuplicateScan_entity/entityfields/filter_name/onValidation.js b/entity/DuplicateScan_entity/entityfields/filter_name/onValidation.js index 597bb8faa57..c239d4068de 100644 --- a/entity/DuplicateScan_entity/entityfields/filter_name/onValidation.js +++ b/entity/DuplicateScan_entity/entityfields/filter_name/onValidation.js @@ -1,3 +1,4 @@ +import("system.logging"); import("system.translate"); import("system.result"); import("system.db"); @@ -11,6 +12,7 @@ import("system.vars"); var targetEntity = vars.get("$field.ENTITY_TO_SCAN_NAME"); var currentFilterName = vars.get("$field.FILTER_NAME"); +var currentId = vars.get("$field.UID"); var messageText = "The combination of filter name and target entity is already in use"; if(targetEntity != "") @@ -19,6 +21,9 @@ if(targetEntity != "") + " where ENTITY_TO_SCAN_NAME = '" + targetEntity + "'" + " and FILTER_NAME = '" + currentFilterName + "'"; + if(currentId != "") + query += " and ID != '" + currentId + "'"; + var occurrences = parseInt(db.cell(query), 10); if(occurrences > 0) diff --git a/entity/Duplicates_entity/Duplicates_entity.aod b/entity/Duplicates_entity/Duplicates_entity.aod index 44e31cb3e5a..c0a6d7fe20f 100644 --- a/entity/Duplicates_entity/Duplicates_entity.aod +++ b/entity/Duplicates_entity/Duplicates_entity.aod @@ -64,6 +64,11 @@ <name>maxReturnValueCount</name> <valueProcess>%aditoprj%/entity/Duplicates_entity/entityfields/maxreturnvaluecount/valueProcess.js</valueProcess> </entityField> + <entityParameter> + <name>recordIdToIgnore_param</name> + <expose v="true" /> + <mandatory v="true" /> + </entityParameter> </entityFields> <recordContainers> <jDitoRecordContainer> @@ -74,6 +79,9 @@ <jDitoRecordFieldMapping> <name>UID.value</name> </jDitoRecordFieldMapping> + <jDitoRecordFieldMapping> + <name>targetEntity.value</name> + </jDitoRecordFieldMapping> <jDitoRecordFieldMapping> <name>VALUE1.value</name> </jDitoRecordFieldMapping> @@ -83,9 +91,6 @@ <jDitoRecordFieldMapping> <name>VALUE3.value</name> </jDitoRecordFieldMapping> - <jDitoRecordFieldMapping> - <name>targetEntity.value</name> - </jDitoRecordFieldMapping> </recordFieldMappings> </jDitoRecordContainer> </recordContainers> diff --git a/entity/Duplicates_entity/entityfields/maxreturnvaluecount/valueProcess.js b/entity/Duplicates_entity/entityfields/maxreturnvaluecount/valueProcess.js new file mode 100644 index 00000000000..3c533d0fc5b --- /dev/null +++ b/entity/Duplicates_entity/entityfields/maxreturnvaluecount/valueProcess.js @@ -0,0 +1,2 @@ +import("system.result"); +result.string("5"); \ No newline at end of file diff --git a/entity/Duplicates_entity/recordcontainers/jditorecordcontainer/contentProcess.js b/entity/Duplicates_entity/recordcontainers/jditorecordcontainer/contentProcess.js index 2dade6b9ba8..f11e641ac15 100644 --- a/entity/Duplicates_entity/recordcontainers/jditorecordcontainer/contentProcess.js +++ b/entity/Duplicates_entity/recordcontainers/jditorecordcontainer/contentProcess.js @@ -9,13 +9,14 @@ var values = JSON.parse(vars.get("$param.valuesToScan_param")); var resultFields = JSON.parse(vars.get("$param.resultFields_param")); var resultFieldsIdFieldName = vars.get("$param.resultFieldsIdFieldName_param"); var maxRecorValues = parseInt(vars.get("$field.maxReturnValueCount"), 10); +var recordIdToIgnore = vars.get("$param.recordIdToIgnore_param"); logging.log("filterName -> " + filterName); logging.log("targetEntity -> " + targetEntity); logging.log("values -> " + values); logging.log("resultFields -> " + resultFields); -var duplicates = DuplicateScannerUtils.ScanForDuplicates(filterName, targetEntity, values, resultFields); +var duplicates = DuplicateScannerUtils.ScanForDuplicates(filterName, targetEntity, values, resultFields, resultFieldsIdFieldName, recordIdToIgnore); logging.log("duplicates -> " + JSON.stringify(duplicates)); //[{"FIRSTNAME":"Markus","LASTNAME":"Altinger","PERSONID":"0a611832-9476-481e-bde5-af3c3a98f1b4"}, @@ -24,8 +25,7 @@ var returnRay = []; logging.log("duplicates.length -> " + duplicates.length); for (i = 0; i < duplicates.length; i++) { - logging.log("i -> " + i); - let newRecord = _compileSingleRecord(duplicates[i], resultFieldsIdFieldName, maxRecorValues); + let newRecord = _compileSingleRecord(duplicates[i], resultFieldsIdFieldName, maxRecorValues, targetEntity); logging.log("newRecord -> " + newRecord); returnRay.push(newRecord); @@ -33,12 +33,13 @@ for (i = 0; i < duplicates.length; i++) result.object(returnRay); -function _compileSingleRecord(pDuplicate, pIdFieldName, maxRecordValues) +function _compileSingleRecord(pDuplicate, pIdFieldName, maxRecordValues, pTargetEntity) { let newRecord = []; let recordId = pDuplicate[pIdFieldName]; newRecord.push(recordId); + newRecord.push(pTargetEntity); let recordCount = 0; @@ -64,7 +65,10 @@ function _compileSingleRecord(pDuplicate, pIdFieldName, maxRecordValues) logging.log("newRecord.length -> " + newRecord.length); logging.log("maxRecordValues -> " + maxRecordValues); + + //If there are less elements than required, fill the record with empty strings + //because a recor dof a recordContainer always has to have the correct length defined by the "recordFieldMappings if(newRecord.length < maxRecordValues) { let elementsToFill = maxRecordValues - newRecord.length; diff --git a/entity/Person_entity/entityfields/personduplicates/children/recordidtoignore_param/valueProcess.js b/entity/Person_entity/entityfields/personduplicates/children/recordidtoignore_param/valueProcess.js new file mode 100644 index 00000000000..0e4d59044e0 --- /dev/null +++ b/entity/Person_entity/entityfields/personduplicates/children/recordidtoignore_param/valueProcess.js @@ -0,0 +1,3 @@ +import("system.vars"); +import("system.result"); +result.string(vars.get("$field.PERSONID")); \ No newline at end of file diff --git a/entity/Person_entity/entityfields/personduplicates/children/valuestoscan_param/valueProcess.js b/entity/Person_entity/entityfields/personduplicates/children/valuestoscan_param/valueProcess.js index d0b8233bee3..1e01a572a98 100644 --- a/entity/Person_entity/entityfields/personduplicates/children/valuestoscan_param/valueProcess.js +++ b/entity/Person_entity/entityfields/personduplicates/children/valuestoscan_param/valueProcess.js @@ -14,5 +14,6 @@ import("system.vars"); var firstname = vars.get("$field.FIRSTNAME"); var lastname = vars.get("$field.LASTNAME"); let gender = vars.get("$field.GENDER"); +let recordId = vars.get("$field.PERSONID"); -result.object({FIRSTNAME: firstname, LASTNAME: lastname, GENDER: gender}); \ No newline at end of file +result.object({FIRSTNAME: firstname, LASTNAME: lastname, GENDER: gender, PERSONID: recordId}); \ No newline at end of file diff --git a/process/DuplicateScanner_lib/process.js b/process/DuplicateScanner_lib/process.js index bae646450a6..0a2af60fd9c 100644 --- a/process/DuplicateScanner_lib/process.js +++ b/process/DuplicateScanner_lib/process.js @@ -12,11 +12,12 @@ import("system.entities"); */ function DuplicateScannerUtils() {} -DuplicateScannerUtils.ScanForDuplicates = function(pFilterName, pTargetEntity, pFilterValues, pTargetEntityResultFields) +DuplicateScannerUtils.ScanForDuplicates = function(pFilterName, pTargetEntity, pFilterValues, pTargetEntityResultFields, pRecordIdFieldToIgnore, pRecordIdValueToIgnore) { + let ignoredRecordFilter = _DuplicateScannerUtils._getIgnoreRecordFilter(pRecordIdFieldToIgnore, pRecordIdValueToIgnore, pTargetEntity); let configuredFilters = _DuplicateScannerUtils._loadFilters(pFilterName, pTargetEntity); - - logging.log("configuredFilters filter -> " + configuredFilters); + + configuredFilters = [ignoredRecordFilter].concat(configuredFilters); let possibleDuplicates = _DuplicateScannerUtils._applyPreFilter(pTargetEntity, configuredFilters, pTargetEntityResultFields, pFilterValues); @@ -40,6 +41,17 @@ var INDEX_FILTER_CONDITION = 0; var INDEX_COUNT_CHARS_TO_USE = 1; var INDEX_MAX_RESULTS_THRESHOLD = 2; +_DuplicateScannerUtils._getIgnoreRecordFilter = function(pRecordIdFieldToIgnore, pRecordIdValueToIgnore, pTargetEntity) +{ + let ignoreFilterJson = JSON.stringify({"entity":pTargetEntity,"filter":{"type":"group","operator":"AND","childs":[{"type":"row","name":pRecordIdFieldToIgnore,"operator":"NOT_EQUAL","value":pRecordIdValueToIgnore,"key":"","contenttype":"TEXT"}]}}); + + return [ignoreFilterJson, null, null]; +} + +/* + * The pre filter is used to narrow the records to be searched by the duplicate scan service + * It loads the target entity and uses filters achieve this. + */ _DuplicateScannerUtils._applyPreFilter = function(pTargetEntity, pFilterCountCharactersToUseRay, pTargetEntityResultFields, pFilterValues) { var combinedFilter = {}; @@ -52,10 +64,11 @@ _DuplicateScannerUtils._applyPreFilter = function(pTargetEntity, pFilterCountCha if(filter == null || filter == "") continue; - + logging.log("complete filter -> " + filter); filter = JSON.parse(filter).filter; - logging.log("filter -> " + JSON.stringify(filter)); + logging.log("countCharsOfValueToUse -> " + countCharsOfValueToUse); + logging.log("maxResultsThreshold -> " + maxResultsThreshold); /* * Insert the values into the current filter. Has to be here so that only the new filter * and therefore the combinedFilter incrementally gets filled and not always everything multiple times. @@ -161,13 +174,20 @@ _DuplicateScannerUtils._insertValuesInFilterTemplate = function(pJsonRootNode, p let fieldValue = pEntitiyFieldAndValueMap[fieldName]; pCountCharsOfValueToUse = parseInt(pCountCharsOfValueToUse, 10); - logging.log("pEntitiyFieldAndValueMap -> " + JSON.stringify(pEntitiyFieldAndValueMap)); - logging.log("fieldName -> " + fieldName); - logging.log("fieldValue -> " + fieldValue); - logging.log("fieldValue.length -> " + fieldValue.length); - logging.log("pCountCharsOfValueToUse -> " + pCountCharsOfValueToUse); + if(fieldValue == null) + { + logging.log("Duplicate Scan: Requested value for field " + fieldName + " not present in the provided valueslist"); + continue; + } - if(_DuplicateScannerUtils._isValueLongerThanCharsToUse(fieldValue.length, pCountCharsOfValueToUse)) +// logging.log("pEntitiyFieldAndValueMap -> " + JSON.stringify(pEntitiyFieldAndValueMap)); +// logging.log("fieldName -> " + fieldName); +// logging.log("fieldValue -> " + fieldValue); +// logging.log("fieldValue.length -> " + fieldValue.length); +// logging.log("pCountCharsOfValueToUse -> " + pCountCharsOfValueToUse); + + if(_DuplicateScannerUtils._isNotNullAndANumber(pCountCharsOfValueToUse) + && _DuplicateScannerUtils._isValueLongerThanCharsToUse(fieldValue.length, pCountCharsOfValueToUse)) { fieldValue = fieldValue.substring(0, pCountCharsOfValueToUse); logging.log("fieldValue geschnitten -> " + fieldValue); @@ -197,4 +217,9 @@ _DuplicateScannerUtils._isValueLongerThanCharsToUse = function(pValueLength, pCo return !isNaN(pCountCharsOfValueToUse) && pCountCharsOfValueToUse > 0 && pValueLength > pCountCharsOfValueToUse; +} + +_DuplicateScannerUtils._isNotNullAndANumber = function(pCountCharsOfValueToUse) +{ + return pCountCharsOfValueToUse != null && !isNaN(pCountCharsOfValueToUse); } \ No newline at end of file -- GitLab