summaryrefslogtreecommitdiffstats
path: root/infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write
diff options
context:
space:
mode:
Diffstat (limited to 'infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write')
-rw-r--r--infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/AbstractGenericWriter.java137
-rw-r--r--infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/NoopWriterRegistry.java40
-rw-r--r--infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/TransactionWriteContext.java121
-rw-r--r--infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/registry/FlatWriterRegistry.java315
-rw-r--r--infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/registry/FlatWriterRegistryBuilder.java71
-rw-r--r--infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/registry/SubtreeWriter.java85
6 files changed, 769 insertions, 0 deletions
diff --git a/infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/AbstractGenericWriter.java b/infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/AbstractGenericWriter.java
new file mode 100644
index 000000000..ff2174ed2
--- /dev/null
+++ b/infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/AbstractGenericWriter.java
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package io.fd.honeycomb.translate.util.write;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+import io.fd.honeycomb.translate.write.WriteContext;
+import io.fd.honeycomb.translate.util.RWUtils;
+import io.fd.honeycomb.translate.write.WriteFailedException;
+import io.fd.honeycomb.translate.write.Writer;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public abstract class AbstractGenericWriter<D extends DataObject> implements Writer<D> {
+
+ private static final Logger LOG = LoggerFactory.getLogger(AbstractGenericWriter.class);
+
+ private final InstanceIdentifier<D> instanceIdentifier;
+
+ protected AbstractGenericWriter(final InstanceIdentifier<D> type) {
+ this.instanceIdentifier = RWUtils.makeIidWildcarded(type);
+ }
+
+ protected void writeCurrent(final InstanceIdentifier<D> id, final D data, final WriteContext ctx)
+ throws WriteFailedException {
+ LOG.debug("{}: Writing current: {} data: {}", this, id, data);
+ writeCurrentAttributes(id, data, ctx);
+ LOG.debug("{}: Current node written successfully", this);
+ }
+
+ protected void updateCurrent(final InstanceIdentifier<D> id, final D dataBefore, final D dataAfter,
+ final WriteContext ctx) throws WriteFailedException {
+ LOG.debug("{}: Updating current: {} dataBefore: {}, datAfter: {}", this, id, dataBefore, dataAfter);
+
+ if (dataBefore.equals(dataAfter)) {
+ LOG.debug("{}: Skipping current(no update): {}", this, id);
+ // No change, ignore
+ return;
+ }
+ updateCurrentAttributes(id, dataBefore, dataAfter, ctx);
+ LOG.debug("{}: Current node updated successfully", this);
+ }
+
+ protected void deleteCurrent(final InstanceIdentifier<D> id, final D dataBefore, final WriteContext ctx)
+ throws WriteFailedException {
+ LOG.debug("{}: Deleting current: {} dataBefore: {}", this, id, dataBefore);
+ deleteCurrentAttributes(id, dataBefore, ctx);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public void update(@Nonnull final InstanceIdentifier<? extends DataObject> id,
+ @Nullable final DataObject dataBefore,
+ @Nullable final DataObject dataAfter,
+ @Nonnull final WriteContext ctx) throws WriteFailedException {
+ LOG.debug("{}: Updating : {}", this, id);
+ LOG.trace("{}: Updating : {}, from: {} to: {}", this, id, dataBefore, dataAfter);
+
+ checkArgument(idPointsToCurrent(id), "Cannot handle data: %s. Only: %s can be handled by writer: %s",
+ id, getManagedDataObjectType(), this);
+
+ if (isWrite(dataBefore, dataAfter)) {
+ writeCurrent((InstanceIdentifier<D>) id, castToManaged(dataAfter), ctx);
+ } else if (isDelete(dataBefore, dataAfter)) {
+ deleteCurrent((InstanceIdentifier<D>) id, castToManaged(dataBefore), ctx);
+ } else {
+ checkArgument(dataBefore != null && dataAfter != null, "No data to process");
+ updateCurrent((InstanceIdentifier<D>) id, castToManaged(dataBefore), castToManaged(dataAfter), ctx);
+ }
+ }
+
+ private void checkDataType(@Nonnull final DataObject dataAfter) {
+ checkArgument(getManagedDataObjectType().getTargetType().isAssignableFrom(dataAfter.getClass()));
+ }
+
+ private D castToManaged(final DataObject data) {
+ checkDataType(data);
+ return getManagedDataObjectType().getTargetType().cast(data);
+ }
+
+ private static boolean isWrite(final DataObject dataBefore,
+ final DataObject dataAfter) {
+ return dataBefore == null && dataAfter != null;
+ }
+
+ private static boolean isDelete(final DataObject dataBefore,
+ final DataObject dataAfter) {
+ return dataAfter == null && dataBefore != null;
+ }
+
+ private boolean idPointsToCurrent(final @Nonnull InstanceIdentifier<? extends DataObject> id) {
+ return id.getTargetType().equals(getManagedDataObjectType().getTargetType());
+ }
+
+ protected abstract void writeCurrentAttributes(@Nonnull final InstanceIdentifier<D> id,
+ @Nonnull final D data,
+ @Nonnull final WriteContext ctx) throws WriteFailedException;
+
+ protected abstract void deleteCurrentAttributes(@Nonnull final InstanceIdentifier<D> id,
+ @Nonnull final D dataBefore,
+ @Nonnull final WriteContext ctx) throws WriteFailedException;
+
+ protected abstract void updateCurrentAttributes(@Nonnull final InstanceIdentifier<D> id,
+ @Nonnull final D dataBefore,
+ @Nonnull final D dataAfter,
+ @Nonnull final WriteContext ctx) throws WriteFailedException;
+
+ @Nonnull
+ @Override
+ public InstanceIdentifier<D> getManagedDataObjectType() {
+ return instanceIdentifier;
+ }
+
+
+ @Override
+ public String toString() {
+ return String.format("Writer[%s]", getManagedDataObjectType().getTargetType().getSimpleName());
+ }
+}
diff --git a/infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/NoopWriterRegistry.java b/infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/NoopWriterRegistry.java
new file mode 100644
index 000000000..8f15c6f8d
--- /dev/null
+++ b/infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/NoopWriterRegistry.java
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package io.fd.honeycomb.translate.util.write;
+
+import io.fd.honeycomb.translate.TranslationException;
+import io.fd.honeycomb.translate.write.WriteContext;
+import io.fd.honeycomb.translate.write.registry.WriterRegistry;
+import javax.annotation.Nonnull;
+
+/**
+ * Empty registry that does not perform any changes. Can be used in data layer, if we want to disable passing data to
+ * translation layer.
+ */
+public class NoopWriterRegistry implements WriterRegistry, AutoCloseable {
+
+ @Override
+ public void update(@Nonnull final DataObjectUpdates updates,
+ @Nonnull final WriteContext ctx) throws TranslationException {
+ // NOOP
+ }
+
+ @Override
+ public void close() throws Exception {
+ // NOOP
+ }
+}
diff --git a/infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/TransactionWriteContext.java b/infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/TransactionWriteContext.java
new file mode 100644
index 000000000..51f999294
--- /dev/null
+++ b/infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/TransactionWriteContext.java
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package io.fd.honeycomb.translate.util.write;
+
+import static com.google.common.base.Preconditions.checkState;
+
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import io.fd.honeycomb.translate.MappingContext;
+import io.fd.honeycomb.translate.ModificationCache;
+import io.fd.honeycomb.translate.write.WriteContext;
+import java.util.Map;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
+import org.opendaylight.yangtools.binding.data.codec.api.BindingNormalizedNodeSerializer;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+/**
+ * Transaction based WriteContext
+ */
+public final class TransactionWriteContext implements WriteContext {
+
+ private final DOMDataReadOnlyTransaction beforeTx;
+ private final DOMDataReadOnlyTransaction afterTx;
+ private final ModificationCache ctx;
+ private final BindingNormalizedNodeSerializer serializer;
+ private final MappingContext mappingContext;
+
+ public TransactionWriteContext(final BindingNormalizedNodeSerializer serializer,
+ final DOMDataReadOnlyTransaction beforeTx,
+ final DOMDataReadOnlyTransaction afterTx,
+ final MappingContext mappingContext) {
+ this.serializer = serializer;
+ // TODO do we have a BA transaction adapter ? If so, use it here and don't pass serializer
+ this.beforeTx = beforeTx;
+ this.afterTx = afterTx;
+ this.mappingContext = mappingContext;
+ this.ctx = new ModificationCache();
+ }
+
+ // TODO make this asynchronous
+
+ @Override
+ public <T extends DataObject> Optional<T> readBefore(@Nonnull final InstanceIdentifier<T> currentId) {
+ return read(currentId, beforeTx);
+ }
+
+ @Override
+ public <T extends DataObject> Optional<T> readAfter(@Nonnull final InstanceIdentifier<T> currentId) {
+ return read(currentId, afterTx);
+ }
+
+
+ private <T extends DataObject> Optional<T> read(final InstanceIdentifier<T> currentId,
+ final DOMDataReadOnlyTransaction tx) {
+ final YangInstanceIdentifier path = serializer.toYangInstanceIdentifier(currentId);
+
+ final CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read =
+ tx.read(LogicalDatastoreType.CONFIGURATION, path);
+
+ try {
+ // TODO once the APIs are asynchronous use just Futures.transform
+ final Optional<NormalizedNode<?, ?>> optional = read.checkedGet();
+
+ if (!optional.isPresent()) {
+ return Optional.absent();
+ }
+
+ final NormalizedNode<?, ?> data = optional.get();
+ final Map.Entry<InstanceIdentifier<?>, DataObject> entry = serializer.fromNormalizedNode(path, data);
+
+ final Class<T> targetType = currentId.getTargetType();
+ checkState(targetType.isAssignableFrom(entry.getValue().getClass()),
+ "Unexpected data object type, should be: %s, but was: %s", targetType, entry.getValue().getClass());
+ return Optional.of(targetType.cast(entry.getValue()));
+ } catch (ReadFailedException e) {
+ throw new IllegalStateException("Unable to perform read", e);
+ }
+ }
+
+ @Nonnull
+ @Override
+ public ModificationCache getModificationCache() {
+ return ctx;
+ }
+
+ @Nonnull
+ @Override
+ public MappingContext getMappingContext() {
+ return mappingContext;
+ }
+
+ /**
+ * Does not close the transactions
+ */
+ @Override
+ public void close() {
+ ctx.close();
+ beforeTx.close();
+ afterTx.close();
+ }
+}
diff --git a/infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/registry/FlatWriterRegistry.java b/infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/registry/FlatWriterRegistry.java
new file mode 100644
index 000000000..df8ec107b
--- /dev/null
+++ b/infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/registry/FlatWriterRegistry.java
@@ -0,0 +1,315 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package io.fd.honeycomb.translate.util.write.registry;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+import com.google.common.base.Optional;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Sets;
+import io.fd.honeycomb.translate.TranslationException;
+import io.fd.honeycomb.translate.write.WriteContext;
+import io.fd.honeycomb.translate.util.RWUtils;
+import io.fd.honeycomb.translate.write.DataObjectUpdate;
+import io.fd.honeycomb.translate.write.WriteFailedException;
+import io.fd.honeycomb.translate.write.Writer;
+import io.fd.honeycomb.translate.write.registry.WriterRegistry;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import javax.annotation.concurrent.ThreadSafe;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Flat writer registry, delegating updates to writers in the order writers were submitted.
+ */
+@ThreadSafe
+final class FlatWriterRegistry implements WriterRegistry {
+
+ private static final Logger LOG = LoggerFactory.getLogger(FlatWriterRegistry.class);
+
+ // All types handled by writers directly or as children
+ private final ImmutableSet<InstanceIdentifier<?>> handledTypes;
+
+ private final Set<InstanceIdentifier<?>> writersOrderReversed;
+ private final Set<InstanceIdentifier<?>> writersOrder;
+ private final Map<InstanceIdentifier<?>, Writer<?>> writers;
+
+ /**
+ * Create flat registry instance.
+ *
+ * @param writers immutable, ordered map of writers to use to process updates. Order of the writers has to be
+ * one in which create and update operations should be handled. Deletes will be handled in reversed
+ * order. All deletes are handled before handling all the updates.
+ */
+ FlatWriterRegistry(@Nonnull final ImmutableMap<InstanceIdentifier<?>, Writer<?>> writers) {
+ this.writers = writers;
+ this.writersOrderReversed = Sets.newLinkedHashSet(Lists.reverse(Lists.newArrayList(writers.keySet())));
+ this.writersOrder = writers.keySet();
+ this.handledTypes = getAllHandledTypes(writers);
+ }
+
+ private static ImmutableSet<InstanceIdentifier<?>> getAllHandledTypes(
+ @Nonnull final ImmutableMap<InstanceIdentifier<?>, Writer<?>> writers) {
+ final ImmutableSet.Builder<InstanceIdentifier<?>> handledTypesBuilder = ImmutableSet.builder();
+ for (Map.Entry<InstanceIdentifier<?>, Writer<?>> writerEntry : writers.entrySet()) {
+ final InstanceIdentifier<?> writerType = writerEntry.getKey();
+ final Writer<?> writer = writerEntry.getValue();
+ handledTypesBuilder.add(writerType);
+ if (writer instanceof SubtreeWriter) {
+ handledTypesBuilder.addAll(((SubtreeWriter<?>) writer).getHandledChildTypes());
+ }
+ }
+ return handledTypesBuilder.build();
+ }
+
+ @Override
+ public void update(@Nonnull final DataObjectUpdates updates,
+ @Nonnull final WriteContext ctx) throws TranslationException {
+ if (updates.isEmpty()) {
+ return;
+ }
+
+ // Optimization
+ if (updates.containsOnlySingleType()) {
+ // First process delete
+ singleUpdate(updates.getDeletes(), ctx);
+ // Next is update
+ singleUpdate(updates.getUpdates(), ctx);
+ } else {
+ // First process deletes
+ bulkUpdate(updates.getDeletes(), ctx, true, writersOrderReversed);
+ // Next are updates
+ bulkUpdate(updates.getUpdates(), ctx, true, writersOrder);
+ }
+
+ LOG.debug("Update successful for types: {}", updates.getTypeIntersection());
+ LOG.trace("Update successful for: {}", updates);
+ }
+
+ private void singleUpdate(@Nonnull final Multimap<InstanceIdentifier<?>, ? extends DataObjectUpdate> updates,
+ @Nonnull final WriteContext ctx) throws WriteFailedException {
+ if (updates.isEmpty()) {
+ return;
+ }
+
+ final InstanceIdentifier<?> singleType = updates.keySet().iterator().next();
+ LOG.debug("Performing single type update for: {}", singleType);
+ Collection<? extends DataObjectUpdate> singleTypeUpdates = updates.get(singleType);
+ Writer<?> writer = getWriter(singleType);
+
+ if (writer == null) {
+ // This node must be handled by a subtree writer, find it and call it or else fail
+ checkArgument(handledTypes.contains(singleType), "Unable to process update. Missing writers for: %s",
+ singleType);
+ writer = getSubtreeWriterResponsible(singleType);
+ singleTypeUpdates = getParentDataObjectUpdate(ctx, updates, writer);
+ }
+
+ LOG.trace("Performing single type update with writer: {}", writer);
+ for (DataObjectUpdate singleUpdate : singleTypeUpdates) {
+ writer.update(singleUpdate.getId(), singleUpdate.getDataBefore(), singleUpdate.getDataAfter(), ctx);
+ }
+ }
+
+ private Writer<?> getSubtreeWriterResponsible(final InstanceIdentifier<?> singleType) {
+ final Writer<?> writer;// This is slow ( minor TODO-perf )
+ writer = writers.values().stream()
+ .filter(w -> w instanceof SubtreeWriter)
+ .filter(w -> ((SubtreeWriter<?>) w).getHandledChildTypes().contains(singleType))
+ .findFirst()
+ .get();
+ return writer;
+ }
+
+ private Collection<DataObjectUpdate> getParentDataObjectUpdate(final WriteContext ctx,
+ final Multimap<InstanceIdentifier<?>, ? extends DataObjectUpdate> updates,
+ final Writer<?> writer) {
+ // Now read data for subtree reader root, but first keyed ID is needed and that ID can be cut from updates
+ InstanceIdentifier<?> firstAffectedChildId = ((SubtreeWriter<?>) writer).getHandledChildTypes().stream()
+ .filter(updates::containsKey)
+ .map(unkeyedId -> updates.get(unkeyedId))
+ .flatMap(doUpdates -> doUpdates.stream())
+ .map(DataObjectUpdate::getId)
+ .findFirst()
+ .get();
+
+ final InstanceIdentifier<?> parentKeyedId =
+ RWUtils.cutId(firstAffectedChildId, writer.getManagedDataObjectType());
+
+ final Optional<? extends DataObject> parentBefore = ctx.readBefore(parentKeyedId);
+ final Optional<? extends DataObject> parentAfter = ctx.readAfter(parentKeyedId);
+ return Collections.singleton(
+ DataObjectUpdate.create(parentKeyedId, parentBefore.orNull(), parentAfter.orNull()));
+ }
+
+ private void bulkUpdate(@Nonnull final Multimap<InstanceIdentifier<?>, ? extends DataObjectUpdate> updates,
+ @Nonnull final WriteContext ctx,
+ final boolean attemptRevert,
+ @Nonnull final Set<InstanceIdentifier<?>> writersOrder) throws BulkUpdateException {
+ if (updates.isEmpty()) {
+ return;
+ }
+
+ LOG.debug("Performing bulk update with revert attempt: {} for: {}", attemptRevert, updates.keySet());
+
+ // Check that all updates can be handled
+ checkAllTypesCanBeHandled(updates);
+
+ // Capture all changes successfully processed in case revert is needed
+ final Set<InstanceIdentifier<?>> processedNodes = new HashSet<>();
+
+ // Iterate over all writers and call update if there are any related updates
+ for (InstanceIdentifier<?> writerType : writersOrder) {
+ Collection<? extends DataObjectUpdate> writersData = updates.get(writerType);
+ final Writer<?> writer = getWriter(writerType);
+
+ if (writersData.isEmpty()) {
+ // If there are no data for current writer, but it is a SubtreeWriter and there are updates to
+ // its children, still invoke it with its root data
+ if (writer instanceof SubtreeWriter<?> && isAffected(((SubtreeWriter<?>) writer), updates)) {
+ // Provide parent data for SubtreeWriter for further processing
+ writersData = getParentDataObjectUpdate(ctx, updates, writer);
+ } else {
+ // Skipping unaffected writer
+ // Alternative to this would be modification sort according to the order of writers
+ continue;
+ }
+ }
+
+ LOG.debug("Performing update for: {}", writerType);
+ LOG.trace("Performing update with writer: {}", writer);
+
+ for (DataObjectUpdate singleUpdate : writersData) {
+ try {
+ writer.update(singleUpdate.getId(), singleUpdate.getDataBefore(), singleUpdate.getDataAfter(), ctx);
+ processedNodes.add(singleUpdate.getId());
+ LOG.trace("Update successful for type: {}", writerType);
+ LOG.debug("Update successful for: {}", singleUpdate);
+ } catch (Exception e) {
+ LOG.error("Error while processing data change of: {} (updates={})", writerType, writersData, e);
+
+ final Reverter reverter = attemptRevert
+ ? new ReverterImpl(processedNodes, updates, writersOrder, ctx)
+ : () -> {}; // NOOP reverter
+
+ // Find out which changes left unprocessed
+ final Set<InstanceIdentifier<?>> unprocessedChanges = updates.values().stream()
+ .map(DataObjectUpdate::getId)
+ .filter(id -> !processedNodes.contains(id))
+ .collect(Collectors.toSet());
+ throw new BulkUpdateException(unprocessedChanges, reverter, e);
+ }
+ }
+ }
+ }
+
+ private void checkAllTypesCanBeHandled(
+ @Nonnull final Multimap<InstanceIdentifier<?>, ? extends DataObjectUpdate> updates) {
+ if (!handledTypes.containsAll(updates.keySet())) {
+ final Sets.SetView<InstanceIdentifier<?>> missingWriters = Sets.difference(updates.keySet(), handledTypes);
+ LOG.warn("Unable to process update. Missing writers for: {}", missingWriters);
+ throw new IllegalArgumentException("Unable to process update. Missing writers for: " + missingWriters);
+ }
+ }
+
+ /**
+ * Check whether {@link SubtreeWriter} is affected by the updates.
+ *
+ * @return true if there are any updates to SubtreeWriter's child nodes (those marked by SubtreeWriter
+ * as being taken care of)
+ * */
+ private static boolean isAffected(final SubtreeWriter<?> writer,
+ final Multimap<InstanceIdentifier<?>, ? extends DataObjectUpdate> updates) {
+ return !Sets.intersection(writer.getHandledChildTypes(), updates.keySet()).isEmpty();
+ }
+
+ @Nullable
+ private Writer<?> getWriter(@Nonnull final InstanceIdentifier<?> singleType) {
+ return writers.get(singleType);
+ }
+
+ // FIXME unit test
+ private final class ReverterImpl implements Reverter {
+
+ private final Collection<InstanceIdentifier<?>> processedNodes;
+ private final Multimap<InstanceIdentifier<?>, ? extends DataObjectUpdate> updates;
+ private final Set<InstanceIdentifier<?>> revertDeleteOrder;
+ private final WriteContext ctx;
+
+ ReverterImpl(final Collection<InstanceIdentifier<?>> processedNodes,
+ final Multimap<InstanceIdentifier<?>, ? extends DataObjectUpdate> updates,
+ final Set<InstanceIdentifier<?>> writersOrderOriginal,
+ final WriteContext ctx) {
+ this.processedNodes = processedNodes;
+ this.updates = updates;
+ // Use opposite ordering when executing revert
+ this.revertDeleteOrder = writersOrderOriginal == FlatWriterRegistry.this.writersOrder
+ ? FlatWriterRegistry.this.writersOrderReversed
+ : FlatWriterRegistry.this.writersOrder;
+ this.ctx = ctx;
+ }
+
+ @Override
+ public void revert() throws RevertFailedException {
+ Multimap<InstanceIdentifier<?>, DataObjectUpdate> updatesToRevert =
+ filterAndRevertProcessed(updates, processedNodes);
+
+ LOG.info("Attempting revert for changes: {}", updatesToRevert);
+ try {
+ // Perform reversed bulk update without revert attempt
+ bulkUpdate(updatesToRevert, ctx, true, revertDeleteOrder);
+ LOG.info("Revert successful");
+ } catch (BulkUpdateException e) {
+ LOG.error("Revert failed", e);
+ throw new RevertFailedException(e.getFailedIds(), e);
+ }
+ }
+
+ /**
+ * Create new updates map, but only keep already processed changes. Switching before and after data for each
+ * update.
+ */
+ private Multimap<InstanceIdentifier<?>, DataObjectUpdate> filterAndRevertProcessed(
+ final Multimap<InstanceIdentifier<?>, ? extends DataObjectUpdate> updates,
+ final Collection<InstanceIdentifier<?>> processedNodes) {
+ final Multimap<InstanceIdentifier<?>, DataObjectUpdate> filtered = HashMultimap.create();
+ for (InstanceIdentifier<?> processedNode : processedNodes) {
+ final InstanceIdentifier<?> wildcardedIid = RWUtils.makeIidWildcarded(processedNode);
+ if (updates.containsKey(wildcardedIid)) {
+ updates.get(wildcardedIid).stream()
+ .filter(dataObjectUpdate -> processedNode.contains(dataObjectUpdate.getId()))
+ .forEach(dataObjectUpdate -> filtered.put(processedNode, dataObjectUpdate.reverse()));
+ }
+ }
+ return filtered;
+ }
+ }
+
+}
diff --git a/infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/registry/FlatWriterRegistryBuilder.java b/infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/registry/FlatWriterRegistryBuilder.java
new file mode 100644
index 000000000..0f75de7e7
--- /dev/null
+++ b/infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/registry/FlatWriterRegistryBuilder.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package io.fd.honeycomb.translate.util.write.registry;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableMap;
+import io.fd.honeycomb.translate.write.registry.WriterRegistryBuilder;
+import io.fd.honeycomb.translate.util.AbstractSubtreeManagerRegistryBuilderBuilder;
+import io.fd.honeycomb.translate.write.Writer;
+import io.fd.honeycomb.translate.write.registry.ModifiableWriterRegistryBuilder;
+import io.fd.honeycomb.translate.write.registry.WriterRegistry;
+import java.util.Set;
+import java.util.stream.Collectors;
+import javax.annotation.Nonnull;
+import javax.annotation.concurrent.NotThreadSafe;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Builder for {@link FlatWriterRegistry} allowing users to specify inter-writer relationships.
+ */
+@NotThreadSafe
+public final class FlatWriterRegistryBuilder
+ extends AbstractSubtreeManagerRegistryBuilderBuilder<Writer<? extends DataObject>, WriterRegistry>
+ implements ModifiableWriterRegistryBuilder, WriterRegistryBuilder {
+
+ private static final Logger LOG = LoggerFactory.getLogger(FlatWriterRegistryBuilder.class);
+
+ @Override
+ protected Writer<? extends DataObject> getSubtreeHandler(final @Nonnull Set<InstanceIdentifier<?>> handledChildren,
+ final @Nonnull Writer<? extends DataObject> writer) {
+ return SubtreeWriter.createForWriter(handledChildren, writer);
+ }
+
+ /**
+ * Create FlatWriterRegistry with writers ordered according to submitted relationships.
+ */
+ @Override
+ public WriterRegistry build() {
+ final ImmutableMap<InstanceIdentifier<?>, Writer<?>> mappedWriters = getMappedHandlers();
+ LOG.debug("Building writer registry with writers: {}",
+ mappedWriters.keySet().stream()
+ .map(InstanceIdentifier::getTargetType)
+ .map(Class::getSimpleName)
+ .collect(Collectors.joining(", ")));
+ LOG.trace("Building writer registry with writers: {}", mappedWriters);
+ return new FlatWriterRegistry(mappedWriters);
+ }
+
+ @VisibleForTesting
+ @Override
+ protected ImmutableMap<InstanceIdentifier<?>, Writer<? extends DataObject>> getMappedHandlers() {
+ return super.getMappedHandlers();
+ }
+}
diff --git a/infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/registry/SubtreeWriter.java b/infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/registry/SubtreeWriter.java
new file mode 100644
index 000000000..bab1da16f
--- /dev/null
+++ b/infra/translate-utils/src/main/java/io/fd/honeycomb/translate/util/write/registry/SubtreeWriter.java
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package io.fd.honeycomb.translate.util.write.registry;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+import com.google.common.collect.Iterables;
+import io.fd.honeycomb.translate.write.WriteContext;
+import io.fd.honeycomb.translate.write.WriteFailedException;
+import io.fd.honeycomb.translate.write.Writer;
+import java.util.HashSet;
+import java.util.Set;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+/**
+ * Simple writer delegate for subtree writers (writers handling also children nodes) providing a list of all the
+ * children nodes being handled.
+ */
+final class SubtreeWriter<D extends DataObject> implements Writer<D> {
+
+ private final Writer<D> delegate;
+ private final Set<InstanceIdentifier<?>> handledChildTypes = new HashSet<>();
+
+ private SubtreeWriter(final Writer<D> delegate, Set<InstanceIdentifier<?>> handledTypes) {
+ this.delegate = delegate;
+ for (InstanceIdentifier<?> handledType : handledTypes) {
+ // Iid has to start with writer's handled root type
+ checkArgument(delegate.getManagedDataObjectType().getTargetType().equals(
+ handledType.getPathArguments().iterator().next().getType()),
+ "Handled node from subtree has to be identified by an instance identifier starting from: %s."
+ + "Instance identifier was: %s", getManagedDataObjectType().getTargetType(), handledType);
+ checkArgument(Iterables.size(handledType.getPathArguments()) > 1,
+ "Handled node from subtree identifier too short: %s", handledType);
+ handledChildTypes.add(InstanceIdentifier.create(Iterables.concat(
+ getManagedDataObjectType().getPathArguments(), Iterables.skip(handledType.getPathArguments(), 1))));
+ }
+ }
+
+ /**
+ * Return set of types also handled by this writer. All of the types are children of the type managed by this
+ * writer excluding the type of this writer.
+ */
+ Set<InstanceIdentifier<?>> getHandledChildTypes() {
+ return handledChildTypes;
+ }
+
+ @Override
+ public void update(
+ @Nonnull final InstanceIdentifier<? extends DataObject> id,
+ @Nullable final DataObject dataBefore,
+ @Nullable final DataObject dataAfter, @Nonnull final WriteContext ctx) throws WriteFailedException {
+ delegate.update(id, dataBefore, dataAfter, ctx);
+ }
+
+ @Override
+ @Nonnull
+ public InstanceIdentifier<D> getManagedDataObjectType() {
+ return delegate.getManagedDataObjectType();
+ }
+
+ /**
+ * Wrap a writer as a subtree writer.
+ */
+ static Writer<?> createForWriter(@Nonnull final Set<InstanceIdentifier<?>> handledChildren,
+ @Nonnull final Writer<? extends DataObject> writer) {
+ return new SubtreeWriter<>(writer, handledChildren);
+ }
+}