aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/dash/app
diff options
context:
space:
mode:
authorTibor Frank <tifrank@cisco.com>2022-05-09 15:57:01 +0200
committerTibor Frank <tifrank@cisco.com>2022-05-09 15:57:01 +0200
commit36850dfb1d3ff689cbaf683669e2e55c6db84596 (patch)
tree36366c9cb4c3003d88811f8c10ab3fb2a5323472 /resources/tools/dash/app
parentd3f16c6ceaa5b475a9c8dc13d21817735e087869 (diff)
feat(uti): data to csv
Change-Id: Iebc24dae2b76408f07c145b2638c346193cf8e1e Signed-off-by: Tibor Frank <tifrank@cisco.com>
Diffstat (limited to 'resources/tools/dash/app')
-rw-r--r--resources/tools/dash/app/pal/stats/graphs.py19
-rw-r--r--resources/tools/dash/app/pal/stats/layout.py25
2 files changed, 29 insertions, 15 deletions
diff --git a/resources/tools/dash/app/pal/stats/graphs.py b/resources/tools/dash/app/pal/stats/graphs.py
index 37fc1b2e73..d9f49407d9 100644
--- a/resources/tools/dash/app/pal/stats/graphs.py
+++ b/resources/tools/dash/app/pal/stats/graphs.py
@@ -19,12 +19,16 @@ import pandas as pd
from datetime import datetime, timedelta
-def select_data(data: pd.DataFrame, itm:str) -> pd.DataFrame:
+def select_data(data: pd.DataFrame, itm:str, start: datetime,
+ end: datetime) -> pd.DataFrame:
"""
"""
- df = data.loc[(data["job"] == itm)].sort_values(
- by="start_time", ignore_index=True)
+ df = data.loc[
+ (data["job"] == itm) &
+ (data["start_time"] >= start) & (data["start_time"] <= end)
+ ].sort_values(by="start_time", ignore_index=True)
+ df = df.dropna(subset=["duration", ])
return df
@@ -35,14 +39,7 @@ def graph_statistics(df: pd.DataFrame, job:str, layout: dict,
"""
"""
- data = select_data(df, job)
- data = data.dropna(subset=["duration", ])
- if data.empty:
- return None, None
-
- data = data.loc[(
- (data["start_time"] >= start) & (data["start_time"] <= end)
- )]
+ data = select_data(df, job, start, end)
if data.empty:
return None, None
diff --git a/resources/tools/dash/app/pal/stats/layout.py b/resources/tools/dash/app/pal/stats/layout.py
index 7ac379ae01..1b2aebe332 100644
--- a/resources/tools/dash/app/pal/stats/layout.py
+++ b/resources/tools/dash/app/pal/stats/layout.py
@@ -29,7 +29,7 @@ from datetime import datetime, timedelta
from copy import deepcopy
from ..data.data import Data
-from .graphs import graph_statistics
+from .graphs import graph_statistics, select_data
class Layout:
@@ -661,16 +661,33 @@ class Layout:
@app.callback(
Output("download-data", "data"),
+ State("control-panel", "data"), # Store
+ State("dpr-period", "start_date"),
+ State("dpr-period", "end_date"),
Input("btn-download-data", "n_clicks"),
prevent_initial_call=True
)
- def _download_data(n_clicks):
+ def _download_data(cp_data: dict, start: str, end: str, n_clicks: int):
"""
"""
- if not n_clicks:
+ if not (n_clicks):
raise PreventUpdate
- return dcc.send_data_frame(self.data.to_csv, "statistics.csv")
+ ctrl_panel = self.ControlPanel(cp_data, self.default)
+
+ job = self._get_job(
+ ctrl_panel.get("ri-duts-value"),
+ ctrl_panel.get("ri-ttypes-value"),
+ ctrl_panel.get("ri-cadences-value"),
+ ctrl_panel.get("dd-tbeds-value")
+ )
+
+ start = datetime(int(start[0:4]), int(start[5:7]), int(start[8:10]))
+ end = datetime(int(end[0:4]), int(end[5:7]), int(end[8:10]))
+ data = select_data(self.data, job, start, end)
+ data = data.drop(columns=["job", ])
+
+ return dcc.send_data_frame(data.T.to_csv, f"{job}-stats.csv")
@app.callback(
Output("row-metadata", "children"),
ight .nf { color: #0066bb; font-weight: bold } /* Name.Function */ .highlight .nl { color: #336699; font-style: italic } /* Name.Label */ .highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */ .highlight .py { color: #336699; font-weight: bold } /* Name.Property */ .highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */ .highlight .nv { color: #336699 } /* Name.Variable */ .highlight .ow { color: #008800 } /* Operator.Word */ .highlight .w { color: #bbbbbb } /* Text.Whitespace */ .highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */ .highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */ .highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */ .highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */ .highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */ .highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */ .highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */ .highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */ .highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */ .highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */ .highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */ .highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */ .highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */ .highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */ .highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */ .highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */ .highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */ }
# Copyright (c) 2016 Comcast Cable Communications Management, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Generation template class

import logging, os,sys, cgi, json, jinja2, HTMLParser

# Classes register themselves in this dictionary
"""Mapping of known processors to their classes"""
siphons = {}

"""Mapping of known output formats to their classes"""
formats = {}


"""Generate rendered output for siphoned data."""
class Siphon(object):

    # Set by subclasses
    """Our siphon name"""
    name = None

    # Set by subclasses
    """Name of an identifier used by this siphon"""
    identifier = None

    # Set by subclasses
    """The pyparsing object to use to parse with"""
    _parser = None

    """The input data"""
    _cmds = None

    """Group key to (directory,file) mapping"""
    _group = None

    """Logging handler"""
    log = None

    """Directory to look for siphon rendering templates"""
    template_directory = None

    """Template environment, if we're using templates"""
    _tplenv = None

    def __init__(self, template_directory, format):
        super(Siphon, self).__init__()
        self.log = logging.getLogger("siphon.process.%s" % self.name)

        # Get our output format details
        fmt_klass = formats[format]
        fmt = fmt_klass()
        self._format = fmt

        # Sort out the template search path
        def _tpldir(name):
            return os.sep.join((template_directory, fmt.name, name))

        self.template_directory = template_directory
        searchpath = [
            _tpldir(self.name),
            _tpldir("default"),
        ]
        loader = jinja2.FileSystemLoader(searchpath=searchpath)
        self._tplenv = jinja2.Environment(
            loader=loader,
            trim_blocks=True,
            keep_trailing_newline=True)

        # Convenience, get a reference to the internal escape and
        # unescape methods in cgi and HTMLParser. These then become
        # available to templates to use, if needed.
        self._h = HTMLParser.HTMLParser()
        self.escape = cgi.escape
        self.unescape = self._h.unescape


    # Output renderers

    """Returns an object to be used as the sorting key in the item index."""
    def index_sort_key(self, group):
        return group

    """Returns a string to use as the header at the top of the item index."""
    def index_header(self):
        return self.template("index_header")

    """Returns the string fragment to use for each section in the item
    index."""
    def index_section(self, group):
        return self.template("index_section", group=group)

    """Returns the string fragment to use for each entry in the item index."""
    def index_entry(self, meta, item):
        return self.template("index_entry", meta=meta, item=item)

    """Returns an object, typically a string, to be used as the sorting key
    for items within a section."""
    def item_sort_key(self, item):
        return item['name']

    """Returns a key for grouping items together."""
    def group_key(self, directory, file, macro, name):
        _global = self._cmds['_global']

        if file in _global and 'group_label' in _global[file]:
            self._group[file] = (directory, file)
            return file

        self._group[directory] = (directory, None)
        return directory

    """Returns a key for identifying items within a grouping."""
    def item_key(self, directory, file, macro, name):
        return name

    """Returns a string to use as the header when rendering the item."""
    def item_header(self, group):
        return self.template("item_header", group=group)

    """Returns a string to use as the body when rendering the item."""
    def item_format(self, meta, item):
        return self.template("item_format", meta=meta, item=item)

    """Returns a string to use as the label for the page reference."""
    def page_label(self, group):
        return "_".join((
            self.name,
            self.sanitize_label(group)
        ))

    """Returns a title to use for a page."""
    def page_title(self, group):
        _global = self._cmds['_global']
        (directory, file) = self._group[group]

        if file and file in _global and 'group_label' in _global[file]:
            return _global[file]['group_label']

        if directory in _global and 'group_label' in _global[directory]:
            return _global[directory]['group_label']

        return directory

    """Returns a string to use as the label for the section reference."""
    def item_label(self, group, item):
        return "__".join((
            self.name,
            item
        ))

    """Label sanitizer; for creating Doxygen references"""
    def sanitize_label(self, value):
        return value.replace(" ", "_") \
                    .replace("/", "_") \
                    .replace(".", "_")

    """Template processor"""
    def template(self, name, **kwargs):
      tpl = self._tplenv.get_template(name + self._format.extension)
      return tpl.render(
            this=self,
            **kwargs)


    # Processing methods

    """Parse the input file into a more usable dictionary structure."""
    def load_json(self, files):
        self._cmds = {}
        self._group = {}

        line_num = 0
        line_start = 0
        for filename in files:
            filename = os.path.relpath(filename)
            self.log.info("Parsing items in file \"%s\"." % filename)
            data = None
            with open(filename, "r") as fd:
                data = json.load(fd)

            self._cmds['_global'] = data['global']

            # iterate the items loaded and regroup it
            for item in data["items"]:
                try:
                    o = self._parser.parse(item['block'])
                except:
                    self.log.error("Exception parsing item: %s\n%s" \
                            % (json.dumps(item, separators=(',', ': '),
                                indent=4),
                                item['block']))
                    raise

                # Augment the item with metadata
                o["meta"] = {}
                for key in item:
                    if key == 'block':
                        continue
                    o['meta'][key] = item[key]

                # Load some interesting fields
                directory = item['directory']
                file = item['file']
                macro = o["macro"]
                name = o["name"]

                # Generate keys to group items by
                group_key = self.group_key(directory, file, macro, name)
                item_key = self.item_key(directory, file, macro, name)

                if group_key not in self._cmds:
                    self._cmds[group_key] = {}

                self._cmds[group_key][item_key] = o

    """Iterate over the input data, calling render methods to generate the
    output."""
    def process(self, out=None):

        if out is None:
            out = sys.stdout

        # Accumulated body contents
        contents = ""

        # Write the header for this siphon type
        out.write(self.index_header())

        # Sort key helper for the index
        def group_sort_key(group):
            return self.index_sort_key(group)

        # Iterate the dictionary and process it
        for group in sorted(self._cmds.keys(), key=group_sort_key):
            if group.startswith('_'):
                continue

            self.log.info("Processing items in group \"%s\" (%s)." % \
                (group, group_sort_key(group)))

            # Generate the section index entry (write it now)
            out.write(self.index_section(group))

            # Generate the item header (save for later)
            contents += self.item_header(group)

            def item_sort_key(key):
                return self.item_sort_key(self._cmds[group][key])

            for key in sorted(self._cmds[group].keys(), key=item_sort_key):
                self.log.debug("--- Processing key \"%s\" (%s)." % \
                    (key, item_sort_key(key)))

                o = self._cmds[group][key]
                meta = {
                    "directory": o['meta']['directory'],
                    "file": o['meta']['file'],
                    "macro": o['macro'],
                    "name": o['name'],
                    "key": key,
                    "label": self.item_label(group, key),
                }

                # Generate the index entry for the item (write it now)
                out.write(self.index_entry(meta, o))

                # Generate the item itself (save for later)
                contents += self.item_format(meta, o)

        # Deliver the accumulated body output
        out.write(contents)


"""Output format class"""
class Format(object):

    """Name of this output format"""
    name = None

    """Expected file extension of templates that build this format"""
    extension = None


"""Markdown output format"""
class FormatMarkdown(Format):
    name = "markdown"
    extension = ".md"

# Register 'markdown'
formats["markdown"] = FormatMarkdown


"""Itemlist output format"""
class FormatItemlist(Format):
    name = "itemlist"
    extension = ".itemlist"

# Register 'itemlist'
formats["itemlist"] = FormatItemlist