Install and run tests for FIAT (next) in a Docker container

Build: #109 was successful

Job: Unit tests (py3) was successful

Stages & jobs

  1. Build Stage

  2. Test Stage

Job result summary

Completed
Duration
2 minutes
Agent
local #3
Revision
52840389aa2826fe186559a074859d51918b3bdb 52840389aa2826fe186559a074859d51918b3bdb
Total tests
681

Tests

  • 681 tests in total
  • 32 tests were skipped
  • 1 minute taken in total.

Error summary

The build generated some errors. See the full build log for more details.

usermod: no changes
Missing .coveralls.yml file. Using only env variables.
Submitting coverage to coveralls.io...
{"source_files": [{"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, null, null, 1, 1, 1, 1, 1, 1, null, 1, 1, 1, 1, 1, 1, 1, 1, null, 1, null, 1, null, null, 1, 1, 1, 1, 1, 1, 1], "source": "# Copyright (C) 2005 The University of Chicago\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n#\n# Written by Robert C. Kirby\n# Modified by Andrew T. T. McRae (Imperial College London)\n#\n# This work is partially supported by the US Department of Energy\n# under award number DE-FG02-04ER25650\n\nfrom FIAT import dual_set, functional, polynomial_set, finite_element\nimport numpy\n\n\nclass P0Dual(dual_set.DualSet):\n    def __init__(self, ref_el):\n        entity_ids = {}\n        nodes = []\n        vs = numpy.array(ref_el.get_vertices())\n        bary = tuple(numpy.average(vs, 0))\n\n        nodes = [functional.PointEvaluation(ref_el, bary)]\n        entity_ids = {}\n        sd = ref_el.get_spatial_dimension()\n        top = ref_el.get_topology()\n        for dim in sorted(top):\n            entity_ids[dim] = {}\n            for entity in sorted(top[dim]):\n                entity_ids[dim][entity] = []\n\n        entity_ids[sd] = {0: [0]}\n\n        super(P0Dual, self).__init__(nodes, ref_el, entity_ids)\n\n\nclass P0(finite_element.CiarletElement):\n    def __init__(self, ref_el):\n        poly_set = polynomial_set.ONPolynomialSet(ref_el, 0)\n        dual = P0Dual(ref_el)\n        degree = 0\n        formdegree = ref_el.get_spatial_dimension()  # n-form\n        super(P0, self).__init__(poly_set, dual, degree, formdegree)\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/P0.py"}, {"coverage": [null, null, null, null, 1, null, null, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, null, null, 1, 1, 1, 1, null, 1, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null], "source": "\"\"\"FInite element Automatic Tabulator -- supports constructing and\nevaluating arbitrary order Lagrange and many other elements.\nSimplices in one, two, and three dimensions are supported.\"\"\"\n\nimport pkg_resources\n\n# Import finite element classes\nfrom FIAT.finite_element import FiniteElement, CiarletElement  # noqa: F401\nfrom FIAT.argyris import Argyris\nfrom FIAT.bell import Bell\nfrom FIAT.argyris import QuinticArgyris\nfrom FIAT.brezzi_douglas_marini import BrezziDouglasMarini\nfrom FIAT.brezzi_douglas_fortin_marini import BrezziDouglasFortinMarini\nfrom FIAT.discontinuous_lagrange import DiscontinuousLagrange\nfrom FIAT.discontinuous_taylor import DiscontinuousTaylor\nfrom FIAT.discontinuous_raviart_thomas import DiscontinuousRaviartThomas\nfrom FIAT.hermite import CubicHermite\nfrom FIAT.lagrange import Lagrange\nfrom FIAT.gauss_lobatto_legendre import GaussLobattoLegendre\nfrom FIAT.gauss_legendre import GaussLegendre\nfrom FIAT.morley import Morley\nfrom FIAT.nedelec import Nedelec\nfrom FIAT.nedelec_second_kind import NedelecSecondKind\nfrom FIAT.P0 import P0\nfrom FIAT.raviart_thomas import RaviartThomas\nfrom FIAT.crouzeix_raviart import CrouzeixRaviart\nfrom FIAT.regge import Regge\nfrom FIAT.hellan_herrmann_johnson import HellanHerrmannJohnson\nfrom FIAT.bubble import Bubble, FacetBubble\nfrom FIAT.tensor_product import TensorProductElement\nfrom FIAT.enriched import EnrichedElement\nfrom FIAT.nodal_enriched import NodalEnrichedElement\nfrom FIAT.discontinuous import DiscontinuousElement\nfrom FIAT.hdiv_trace import HDivTrace\nfrom FIAT.mixed import MixedElement                       # noqa: F401\nfrom FIAT.restricted import RestrictedElement             # noqa: F401\nfrom FIAT.quadrature_element import QuadratureElement     # noqa: F401\n\n# Important functionality\nfrom FIAT.quadrature import make_quadrature               # noqa: F401\nfrom FIAT.quadrature_schemes import create_quadrature     # noqa: F401\nfrom FIAT.reference_element import ufc_cell, ufc_simplex  # noqa: F401\nfrom FIAT.hdivcurl import Hdiv, Hcurl                     # noqa: F401\n\n__version__ = pkg_resources.get_distribution(\"fenics-fiat\").version\n\n# List of supported elements and mapping to element classes\nsupported_elements = {\"Argyris\": Argyris,\n                      \"Bell\": Bell,\n                      \"Brezzi-Douglas-Marini\": BrezziDouglasMarini,\n                      \"Brezzi-Douglas-Fortin-Marini\": BrezziDouglasFortinMarini,\n                      \"Bubble\": Bubble,\n                      \"FacetBubble\": FacetBubble,\n                      \"Crouzeix-Raviart\": CrouzeixRaviart,\n                      \"Discontinuous Lagrange\": DiscontinuousLagrange,\n                      \"Discontinuous Taylor\": DiscontinuousTaylor,\n                      \"Discontinuous Raviart-Thomas\": DiscontinuousRaviartThomas,\n                      \"Hermite\": CubicHermite,\n                      \"Lagrange\": Lagrange,\n                      \"Gauss-Lobatto-Legendre\": GaussLobattoLegendre,\n                      \"Gauss-Legendre\": GaussLegendre,\n                      \"Morley\": Morley,\n                      \"Nedelec 1st kind H(curl)\": Nedelec,\n                      \"Nedelec 2nd kind H(curl)\": NedelecSecondKind,\n                      \"Raviart-Thomas\": RaviartThomas,\n                      \"Regge\": Regge,\n                      \"EnrichedElement\": EnrichedElement,\n                      \"NodalEnrichedElement\": NodalEnrichedElement,\n                      \"TensorProductElement\": TensorProductElement,\n                      \"BrokenElement\": DiscontinuousElement,\n                      \"HDiv Trace\": HDivTrace,\n                      \"Hellan-Herrmann-Johnson\": HellanHerrmannJohnson}\n\n# List of extra elements\nextra_elements = {\"P0\": P0,\n                  \"Quintic Argyris\": QuinticArgyris}\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/__init__.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, null, null, 1, 1, 1, 1, 1, null, 1, 1, 1, null, 1, 0, null, 1, 1, 1, null, null, null, 1, 1, 1, null, null, 1, 1, 1, 1, null, null, 1, 1, 1, null, 1, 1, null, null, 1, 1, null, 1, 1, 1, 1, 1, null, null, 1, 0, 0, 0, 0, 0, null, null, 1, 1, 0, 0, 0, 0, 0, null, 1, null, 1, null, null, 1, 1, 1, 1, 1, null, null, null, 1, 1, 1, 1, 0, null, 1, null, null, null, 1, 1, 1, null, null, 1, 1, 1, 1, null, null, 1, 1, 1, null, 1, 1, null, null, 1, 1, 1, 1, 1, 1, 1, null, 1, null, 1, null, null, 1, null, null, 1, 1, 1, 1, null, null, 1, null, null, 1, 1, 1, 1], "source": "# Copyright (C) 2008 Robert C. Kirby (Texas Tech University)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nfrom FIAT import finite_element, polynomial_set, dual_set, functional\nfrom FIAT.reference_element import TRIANGLE\n\n\nclass ArgyrisDualSet(dual_set.DualSet):\n    def __init__(self, ref_el, degree):\n        entity_ids = {}\n        nodes = []\n        cur = 0\n\n        top = ref_el.get_topology()\n        verts = ref_el.get_vertices()\n        sd = ref_el.get_spatial_dimension()\n\n        if ref_el.get_shape() != TRIANGLE:\n            raise ValueError(\"Argyris only defined on triangles\")\n\n        pe = functional.PointEvaluation\n        pd = functional.PointDerivative\n        pnd = functional.PointNormalDerivative\n\n        # get jet at each vertex\n\n        entity_ids[0] = {}\n        for v in sorted(top[0]):\n            nodes.append(pe(ref_el, verts[v]))\n\n            # first derivatives\n            for i in range(sd):\n                alpha = [0] * sd\n                alpha[i] = 1\n                nodes.append(pd(ref_el, verts[v], alpha))\n\n            # second derivatives\n            alphas = [[2, 0], [1, 1], [0, 2]]\n            for alpha in alphas:\n                nodes.append(pd(ref_el, verts[v], alpha))\n\n            entity_ids[0][v] = list(range(cur, cur + 6))\n            cur += 6\n\n        # edge dof\n        entity_ids[1] = {}\n        for e in sorted(top[1]):\n            # normal derivatives at degree - 4 points on each edge\n            ndpts = ref_el.make_points(1, e, degree - 3)\n            ndnds = [pnd(ref_el, e, pt) for pt in ndpts]\n            nodes.extend(ndnds)\n            entity_ids[1][e] = list(range(cur, cur + len(ndpts)))\n            cur += len(ndpts)\n\n            # point value at degree-5 points on each edge\n            if degree > 5:\n                ptvalpts = ref_el.make_points(1, e, degree - 4)\n                ptvalnds = [pe(ref_el, pt) for pt in ptvalpts]\n                nodes.extend(ptvalnds)\n                entity_ids[1][e] += list(range(cur, cur + len(ptvalpts)))\n                cur += len(ptvalpts)\n\n        # internal dof\n        entity_ids[2] = {}\n        if degree > 5:\n            internalpts = ref_el.make_points(2, 0, degree - 3)\n            internalnds = [pe(ref_el, pt) for pt in internalpts]\n            nodes.extend(internalnds)\n            entity_ids[2][0] = list(range(cur, cur + len(internalpts)))\n            cur += len(internalpts)\n        else:\n            entity_ids[2] = {0: []}\n\n        super(ArgyrisDualSet, self).__init__(nodes, ref_el, entity_ids)\n\n\nclass QuinticArgyrisDualSet(dual_set.DualSet):\n    def __init__(self, ref_el):\n        entity_ids = {}\n        nodes = []\n        cur = 0\n\n        # make nodes by getting points\n        # need to do this dimension-by-dimension, facet-by-facet\n        top = ref_el.get_topology()\n        verts = ref_el.get_vertices()\n        sd = ref_el.get_spatial_dimension()\n        if ref_el.get_shape() != TRIANGLE:\n            raise ValueError(\"Argyris only defined on triangles\")\n\n        pd = functional.PointDerivative\n\n        # get jet at each vertex\n\n        entity_ids[0] = {}\n        for v in sorted(top[0]):\n            nodes.append(functional.PointEvaluation(ref_el, verts[v]))\n\n            # first derivatives\n            for i in range(sd):\n                alpha = [0] * sd\n                alpha[i] = 1\n                nodes.append(pd(ref_el, verts[v], alpha))\n\n            # second derivatives\n            alphas = [[2, 0], [1, 1], [0, 2]]\n            for alpha in alphas:\n                nodes.append(pd(ref_el, verts[v], alpha))\n\n            entity_ids[0][v] = list(range(cur, cur + 6))\n            cur += 6\n\n        # edge dof -- normal at each edge midpoint\n        entity_ids[1] = {}\n        for e in sorted(top[1]):\n            pt = ref_el.make_points(1, e, 2)[0]\n            n = functional.PointNormalDerivative(ref_el, e, pt)\n            nodes.append(n)\n            entity_ids[1][e] = [cur]\n            cur += 1\n\n        entity_ids[2] = {0: []}\n\n        super(QuinticArgyrisDualSet, self).__init__(nodes, ref_el, entity_ids)\n\n\nclass Argyris(finite_element.CiarletElement):\n    \"\"\"The Argyris finite element.\"\"\"\n\n    def __init__(self, ref_el, degree):\n        poly_set = polynomial_set.ONPolynomialSet(ref_el, degree)\n        dual = ArgyrisDualSet(ref_el, degree)\n        super(Argyris, self).__init__(poly_set, dual, degree)\n\n\nclass QuinticArgyris(finite_element.CiarletElement):\n    \"\"\"The Argyris finite element.\"\"\"\n\n    def __init__(self, ref_el):\n        poly_set = polynomial_set.ONPolynomialSet(ref_el, 5)\n        dual = QuinticArgyrisDualSet(ref_el)\n        super(QuinticArgyris, self).__init__(poly_set, dual, 5)\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/argyris.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, null, null, 1, 1, 0, 0, 0, null, null, null, 0, 0, 0, 0, 0, null, 0, null, null, null, 0, 0, 0, null, null, 0, 0, 0, 0, null, null, 0, 0, 0, null, 0, 0, null, null, 0, 0, 0, 0, 0, 0, null, 0, 0, 0, 0, 0, null, 0, null, 0, null, null, 1, null, null, 1, 0, 0, 0], "source": "# Copyright (C) 2018 Robert C. Kirby\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\n# This is not quite Bell, but is 21-dofs and includes 3 extra constraint\n# functionals.  The first 18 basis functions are the reference element\n# bfs, but the extra three are used in the transformation theory.\n\nfrom FIAT import finite_element, polynomial_set, dual_set, functional\nfrom FIAT.reference_element import TRIANGLE, ufc_simplex\n\n\nclass BellDualSet(dual_set.DualSet):\n    def __init__(self, ref_el):\n        entity_ids = {}\n        nodes = []\n        cur = 0\n\n        # make nodes by getting points\n        # need to do this dimension-by-dimension, facet-by-facet\n        top = ref_el.get_topology()\n        verts = ref_el.get_vertices()\n        sd = ref_el.get_spatial_dimension()\n        if ref_el.get_shape() != TRIANGLE:\n            raise ValueError(\"Bell only defined on triangles\")\n\n        pd = functional.PointDerivative\n\n        # get jet at each vertex\n\n        entity_ids[0] = {}\n        for v in sorted(top[0]):\n            nodes.append(functional.PointEvaluation(ref_el, verts[v]))\n\n            # first derivatives\n            for i in range(sd):\n                alpha = [0] * sd\n                alpha[i] = 1\n                nodes.append(pd(ref_el, verts[v], alpha))\n\n            # second derivatives\n            alphas = [[2, 0], [1, 1], [0, 2]]\n            for alpha in alphas:\n                nodes.append(pd(ref_el, verts[v], alpha))\n\n            entity_ids[0][v] = list(range(cur, cur + 6))\n            cur += 6\n\n        # we need an edge quadrature rule for the moment\n        from FIAT.quadrature_schemes import create_quadrature\n        from FIAT.jacobi import eval_jacobi\n        rline = ufc_simplex(1)\n        q1d = create_quadrature(rline, 8)\n        q1dpts = q1d.get_points()\n        leg4_at_qpts = eval_jacobi(0, 0, 4, 2.0*q1dpts - 1)\n\n        imond = functional.IntegralMomentOfNormalDerivative\n        entity_ids[1] = {}\n        for e in sorted(top[1]):\n            entity_ids[1][e] = [18+e]\n            nodes.append(imond(ref_el, e, q1d, leg4_at_qpts))\n\n        entity_ids[2] = {0: []}\n\n        super(BellDualSet, self).__init__(nodes, ref_el, entity_ids)\n\n\nclass Bell(finite_element.CiarletElement):\n    \"\"\"The Bell finite element.\"\"\"\n\n    def __init__(self, ref_el):\n        poly_set = polynomial_set.ONPolynomialSet(ref_el, 5)\n        dual = BellDualSet(ref_el)\n        super(Bell, self).__init__(poly_set, dual, 5)\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/bell.py"}, {"coverage": [1, null, null, 1, null, null, 1, 1, null, null, null, 1, 1, null, 1, 1, null, null, null, null, 1, 1, 1, 1, 1, 1, null, null, null, null, 1, 1, 1, 1, 1, 1, 1, 1, null, null, 1, 1, 1, 1, null, 1, null, null, 1, 1, null, 1, 1, 1, 1, null, null, 1, 1, null, 1, null, null, 1, 1, 1, 0, null, null, null, null, null, 1, null, 1, null, null, 1, null, 1, 1, null, null, null, 1, 1, null, null, 1, 1, null, 1, null, 1, null, null, null, null, null, null, 1, 1, null, null, 1, null, null, 1, null, 1, 0, null, 1, 1, 1, 1, null], "source": "from FIAT import (finite_element, functional, dual_set,\n                  polynomial_set, lagrange)\n\nimport numpy\n\n\nclass BDFMDualSet(dual_set.DualSet):\n    def __init__(self, ref_el, degree):\n\n        # Initialize containers for map: mesh_entity -> dof number and\n        # dual basis\n        entity_ids = {}\n        nodes = []\n\n        sd = ref_el.get_spatial_dimension()\n        t = ref_el.get_topology()\n\n        # Define each functional for the dual set\n        # codimension 1 facet normals.\n        # note this will die for degree greater than 1.\n        for i in range(len(t[sd - 1])):\n            pts_cur = ref_el.make_points(sd - 1, i, sd + degree)\n            for j in range(len(pts_cur)):\n                pt_cur = pts_cur[j]\n                f = functional.PointScaledNormalEvaluation(ref_el, i, pt_cur)\n                nodes.append(f)\n\n        # codimension 1 facet tangents.\n        # because the tangent component is discontinuous, these actually\n        # count as internal nodes.\n        tangent_count = 0\n        for i in range(len(t[sd - 1])):\n            pts_cur = ref_el.make_points(sd - 1, i, sd + degree - 1)\n            tangent_count += len(pts_cur)\n            for j in range(len(pts_cur)):\n                pt_cur = pts_cur[j]\n                f = functional.PointEdgeTangentEvaluation(ref_el, i, pt_cur)\n                nodes.append(f)\n\n        # sets vertices (and in 3d, edges) to have no nodes\n        for i in range(sd - 1):\n            entity_ids[i] = {}\n            for j in range(len(t[i])):\n                entity_ids[i][j] = []\n\n        cur = 0\n\n        # set codimension 1 (edges 2d, faces 3d) dof\n        pts_facet_0 = ref_el.make_points(sd - 1, 0, sd + degree)\n        pts_per_facet = len(pts_facet_0)\n\n        entity_ids[sd - 1] = {}\n        for i in range(len(t[sd - 1])):\n            entity_ids[sd - 1][i] = list(range(cur, cur + pts_per_facet))\n            cur += pts_per_facet\n\n        # internal nodes\n        entity_ids[sd] = {0: list(range(cur, cur + tangent_count))}\n        cur += tangent_count\n\n        super(BDFMDualSet, self).__init__(nodes, ref_el, entity_ids)\n\n\ndef BDFMSpace(ref_el, order):\n    sd = ref_el.get_spatial_dimension()\n    if sd != 2:\n        raise Exception(\"BDFM_k elements only valid for dim 2\")\n    # Note that order will be 2.\n\n    # Linear vector valued space. Since the embedding degree of this element\n    # is 2, this is implemented by taking the quadratic space and selecting\n    # the linear polynomials.\n    vec_poly_set = polynomial_set.ONPolynomialSet(ref_el, order, (sd,))\n    # Linears are the first three polynomials in each dimension.\n    vec_poly_set = vec_poly_set.take([0, 1, 2, 6, 7, 8])\n\n    # Scalar quadratic Lagrange element.\n    lagrange_ele = lagrange.Lagrange(ref_el, order)\n    # Select the dofs associated with the edges.\n    edge_dofs_dict = lagrange_ele.dual.get_entity_ids()[sd - 1]\n    edge_dofs = numpy.array([(edge, dof)\n                             for edge, dofs in list(edge_dofs_dict.items())\n                             for dof in dofs])\n\n    tangent_polys = lagrange_ele.poly_set.take(edge_dofs[:, 1])\n    new_coeffs = numpy.zeros((tangent_polys.get_num_members(), sd, tangent_polys.coeffs.shape[-1]))\n\n    # Outer product of the tangent vectors with the quadratic edge polynomials.\n    for i, (edge, dof) in enumerate(edge_dofs):\n        tangent = ref_el.compute_edge_tangent(edge)\n\n        new_coeffs[i, :, :] = numpy.outer(tangent, tangent_polys.coeffs[i, :])\n\n    bubble_set = polynomial_set.PolynomialSet(ref_el,\n                                              order,\n                                              order,\n                                              vec_poly_set.get_expansion_set(),\n                                              new_coeffs,\n                                              vec_poly_set.get_dmats())\n\n    element_set = polynomial_set.polynomial_set_union_normalized(bubble_set, vec_poly_set)\n    return element_set\n\n\nclass BrezziDouglasFortinMarini(finite_element.CiarletElement):\n    \"\"\"The BDFM element\"\"\"\n\n    def __init__(self, ref_el, degree):\n\n        if degree != 2:\n            raise Exception(\"BDFM_k elements only valid for k == 2\")\n\n        poly_set = BDFMSpace(ref_el, degree)\n        dual = BDFMDualSet(ref_el, degree - 1)\n        formdegree = ref_el.get_spatial_dimension() - 1\n        super(BrezziDouglasFortinMarini, self).__init__(poly_set, dual, degree, formdegree,\n                                                        mapping=\"contravariant piola\")\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/brezzi_douglas_fortin_marini.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, null, null, 1, 1, null, null, null, 1, 1, null, 1, 1, null, null, null, 1, 1, 1, 1, 1, 1, null, null, 1, 1, 1, 1, 1, 1, 1, null, 1, 1, 1, 1, null, null, 1, 1, 1, 1, null, 1, null, null, 1, 1, null, 1, 1, 1, 1, null, null, 1, null, 1, 1, 1, null, 1, null, null, 1, null, null, 1, null, 1, 0, null, 1, 1, 1, 1, 1, null], "source": "# Copyright (C) 2008-2012 Robert C. Kirby (Texas Tech University)\n# Modified by Andrew T. T. McRae (Imperial College London)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nfrom FIAT import (finite_element, quadrature, functional, dual_set,\n                  polynomial_set, nedelec)\n\n\nclass BDMDualSet(dual_set.DualSet):\n    def __init__(self, ref_el, degree):\n\n        # Initialize containers for map: mesh_entity -> dof number and\n        # dual basis\n        entity_ids = {}\n        nodes = []\n\n        sd = ref_el.get_spatial_dimension()\n        t = ref_el.get_topology()\n\n        # Define each functional for the dual set\n        # codimension 1 facets\n        for i in range(len(t[sd - 1])):\n            pts_cur = ref_el.make_points(sd - 1, i, sd + degree)\n            for j in range(len(pts_cur)):\n                pt_cur = pts_cur[j]\n                f = functional.PointScaledNormalEvaluation(ref_el, i, pt_cur)\n                nodes.append(f)\n\n        # internal nodes\n        if degree > 1:\n            Q = quadrature.make_quadrature(ref_el, 2 * (degree + 1))\n            qpts = Q.get_points()\n            Nedel = nedelec.Nedelec(ref_el, degree - 1)\n            Nedfs = Nedel.get_nodal_basis()\n            zero_index = tuple([0 for i in range(sd)])\n            Ned_at_qpts = Nedfs.tabulate(qpts)[zero_index]\n\n            for i in range(len(Ned_at_qpts)):\n                phi_cur = Ned_at_qpts[i, :]\n                l_cur = functional.FrobeniusIntegralMoment(ref_el, Q, phi_cur)\n                nodes.append(l_cur)\n\n        # sets vertices (and in 3d, edges) to have no nodes\n        for i in range(sd - 1):\n            entity_ids[i] = {}\n            for j in range(len(t[i])):\n                entity_ids[i][j] = []\n\n        cur = 0\n\n        # set codimension 1 (edges 2d, faces 3d) dof\n        pts_facet_0 = ref_el.make_points(sd - 1, 0, sd + degree)\n        pts_per_facet = len(pts_facet_0)\n\n        entity_ids[sd - 1] = {}\n        for i in range(len(t[sd - 1])):\n            entity_ids[sd - 1][i] = list(range(cur, cur + pts_per_facet))\n            cur += pts_per_facet\n\n        # internal nodes, if applicable\n        entity_ids[sd] = {0: []}\n\n        if degree > 1:\n            num_internal_nodes = len(Ned_at_qpts)\n            entity_ids[sd][0] = list(range(cur, cur + num_internal_nodes))\n\n        super(BDMDualSet, self).__init__(nodes, ref_el, entity_ids)\n\n\nclass BrezziDouglasMarini(finite_element.CiarletElement):\n    \"\"\"The BDM element\"\"\"\n\n    def __init__(self, ref_el, degree):\n\n        if degree < 1:\n            raise Exception(\"BDM_k elements only valid for k >= 1\")\n\n        sd = ref_el.get_spatial_dimension()\n        poly_set = polynomial_set.ONPolynomialSet(ref_el, degree, (sd, ))\n        dual = BDMDualSet(ref_el, degree)\n        formdegree = sd - 1  # (n-1)-form\n        super(BrezziDouglasMarini, self).__init__(poly_set, dual, degree, formdegree,\n                                                  mapping=\"contravariant piola\")\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/brezzi_douglas_marini.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, null, null, 1, null, null, 1, 1, null, 1, 1, 1, 1, 1, null, 1, null, null, 1, null, null, 1, 1, null, null, 1, null, null, 1, 0], "source": "# Copyright (C) 2013 Andrew T. T. McRae (Imperial College London)\n# Copyright (C) 2015 Jan Blechta\n# Copyright (C) 2018 Patrick E. Farrell\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nfrom FIAT.lagrange import Lagrange\nfrom FIAT.restricted import RestrictedElement\nfrom itertools import chain\n\n\nclass CodimBubble(RestrictedElement):\n    \"\"\"Bubbles of a certain codimension.\"\"\"\n\n    def __init__(self, ref_el, degree, codim):\n        element = Lagrange(ref_el, degree)\n\n        cell_dim = ref_el.get_dimension()\n        assert cell_dim == max(element.entity_dofs().keys())\n        dofs = list(sorted(chain(*element.entity_dofs()[cell_dim - codim].values())))\n        if len(dofs) == 0:\n            raise RuntimeError('Bubble element of degree %d and codimension %d has no dofs' % (degree, codim))\n\n        super(CodimBubble, self).__init__(element, indices=dofs)\n\n\nclass Bubble(CodimBubble):\n    \"\"\"The bubble finite element: the dofs of the Lagrange FE in the interior of the cell\"\"\"\n\n    def __init__(self, ref_el, degree):\n        super(Bubble, self).__init__(ref_el, degree, codim=0)\n\n\nclass FacetBubble(CodimBubble):\n    \"\"\"The facet bubble finite element: the dofs of the Lagrange FE in the interior of the facets\"\"\"\n\n    def __init__(self, ref_el, degree):\n        super(FacetBubble, self).__init__(ref_el, degree, codim=1)\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/bubble.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, null, 1, 1, 1, 1, 1, 1, 1, null, null, 1, null, null, null, 1, null, null, 1, 1, null, null, 1, 1, null, null, 1, null, null, 1, null, null, 1, 1, null, null, 1, null, null, 1, null, null, null, null, null, null, null, 1, null, null, 1, 0, null, null, null, 1, 1, 1], "source": "# Copyright (C) 2010 Marie E. Rognes\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n#\n# Written by Marie E. Rognes <meg@simula.no> based on original\n# implementation by Robert C. Kirby.\n#\n# Last changed: 2010-01-28\n\nfrom FIAT import finite_element, polynomial_set, dual_set, functional\n\n\ndef _initialize_entity_ids(topology):\n    entity_ids = {}\n    for (i, entity) in list(topology.items()):\n        entity_ids[i] = {}\n        for j in entity:\n            entity_ids[i][j] = []\n    return entity_ids\n\n\nclass CrouzeixRaviartDualSet(dual_set.DualSet):\n    \"\"\"Dual basis for Crouzeix-Raviart element (linears continuous at\n    boundary midpoints).\"\"\"\n\n    def __init__(self, cell, degree):\n\n        # Get topology dictionary\n        d = cell.get_spatial_dimension()\n        topology = cell.get_topology()\n\n        # Initialize empty nodes and entity_ids\n        entity_ids = _initialize_entity_ids(topology)\n        nodes = [None for i in list(topology[d - 1].keys())]\n\n        # Construct nodes and entity_ids\n        for i in topology[d - 1]:\n\n            # Construct midpoint\n            x = cell.make_points(d - 1, i, d)[0]\n\n            # Degree of freedom number i is evaluation at midpoint\n            nodes[i] = functional.PointEvaluation(cell, x)\n            entity_ids[d - 1][i] += [i]\n\n        # Initialize super-class\n        super(CrouzeixRaviartDualSet, self).__init__(nodes, cell, entity_ids)\n\n\nclass CrouzeixRaviart(finite_element.CiarletElement):\n    \"\"\"The Crouzeix-Raviart finite element:\n\n    K:                 Triangle/Tetrahedron\n    Polynomial space:  P_1\n    Dual basis:        Evaluation at facet midpoints\n    \"\"\"\n\n    def __init__(self, cell, degree):\n\n        # Crouzeix Raviart is only defined for polynomial degree == 1\n        if not (degree == 1):\n            raise Exception(\"Crouzeix-Raviart only defined for degree 1\")\n\n        # Construct polynomial spaces, dual basis and initialize\n        # FiniteElement\n        space = polynomial_set.ONPolynomialSet(cell, 1)\n        dual = CrouzeixRaviartDualSet(cell, 1)\n        super(CrouzeixRaviart, self).__init__(space, dual, 1)\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/crouzeix_raviart.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, null, null, 1, null, null, 1, 1, 1, 1, 1, 1, 1, 1, null, 1, null, 1, null, null, 1, null, 1, null, 1, null, 1, null, 1, null, 1, null, null, 0, null, 1, null, 0, null, 1, null, null, 0, null, 1, null, null, null, 0, null, 1, null, 0, null, 1, null, 0, null, 1, null, null, 1, null, 1, null, 1, null, 1, null, null, 0, null, 1, null, 0], "source": "# Copyright (C) 2014 Andrew T. T. McRae (Imperial College London)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nfrom FIAT.finite_element import CiarletElement\nfrom FIAT.dual_set import DualSet\n\n\nclass DiscontinuousElement(CiarletElement):\n    \"\"\"A copy of an existing element where all dofs are associated with the cell\"\"\"\n\n    def __init__(self, element):\n        self._element = element\n        new_entity_ids = {}\n        topology = element.get_reference_element().get_topology()\n        for dim in sorted(topology):\n            new_entity_ids[dim] = {}\n            for ent in sorted(topology[dim]):\n                new_entity_ids[dim][ent] = []\n\n        new_entity_ids[dim][0] = list(range(element.space_dimension()))\n        # re-initialise the dual, so entity_closure_dofs is recalculated\n        self.dual = DualSet(element.dual_basis(), element.get_reference_element(), new_entity_ids)\n\n        # fully discontinuous\n        self.formdegree = element.get_reference_element().get_spatial_dimension()\n\n    def degree(self):\n        \"Return the degree of the (embedding) polynomial space.\"\n        return self._element.degree()\n\n    def get_reference_element(self):\n        \"Return the reference element for the finite element.\"\n        return self._element.get_reference_element()\n\n    def get_nodal_basis(self):\n        \"\"\"Return the nodal basis, encoded as a PolynomialSet object,\n        for the finite element.\"\"\"\n        return self._element.get_nodal_basis()\n\n    def get_order(self):\n        \"Return the order of the element (may be different from the degree)\"\n        return self._element.get_order()\n\n    def get_coeffs(self):\n        \"\"\"Return the expansion coefficients for the basis of the\n        finite element.\"\"\"\n        return self._element.get_coeffs()\n\n    def mapping(self):\n        \"\"\"Return a list of appropriate mappings from the reference\n        element to a physical element for each basis function of the\n        finite element.\"\"\"\n        return self._element.mapping()\n\n    def num_sub_elements(self):\n        \"Return the number of sub-elements.\"\n        return self._element.num_sub_elements()\n\n    def space_dimension(self):\n        \"Return the dimension of the finite element space.\"\n        return self._element.space_dimension()\n\n    def tabulate(self, order, points, entity=None):\n        \"\"\"Return tabulated values of derivatives up to given order of\n        basis functions at given points.\"\"\"\n        return self._element.tabulate(order, points, entity)\n\n    def value_shape(self):\n        \"Return the value shape of the finite element functions.\"\n        return self._element.value_shape()\n\n    def dmats(self):\n        \"\"\"Return dmats: expansion coefficients for basis function\n        derivatives.\"\"\"\n        return self._element.dmats()\n\n    def get_num_members(self, arg):\n        \"Return number of members of the expansion set.\"\n        return self._element.get_num_members()\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/discontinuous.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, null, 1, null, null, null, null, null, 1, 1, 1, null, null, null, 1, null, 1, 1, 1, 1, 1, 1, null, 1, 1, 1, 1, null, 1, null, 1, null, null, 1, null, null, 1, 1, 1, 1, 1, null, null, 1, 1, 1, null, 1], "source": "# Copyright (C) 2008 Robert C. Kirby (Texas Tech University)\n# Modified by Andrew T. T. McRae (Imperial College London)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nfrom FIAT import finite_element, polynomial_set, dual_set, functional, P0\n\n\nclass DiscontinuousLagrangeDualSet(dual_set.DualSet):\n    \"\"\"The dual basis for Lagrange elements.  This class works for\n    simplices of any dimension.  Nodes are point evaluation at\n    equispaced points.  This is the discontinuous version where\n    all nodes are topologically associated with the cell itself\"\"\"\n\n    def __init__(self, ref_el, degree):\n        entity_ids = {}\n        nodes = []\n\n        # make nodes by getting points\n        # need to do this dimension-by-dimension, facet-by-facet\n        top = ref_el.get_topology()\n\n        cur = 0\n        for dim in sorted(top):\n            entity_ids[dim] = {}\n            for entity in sorted(top[dim]):\n                pts_cur = ref_el.make_points(dim, entity, degree)\n                nodes_cur = [functional.PointEvaluation(ref_el, x)\n                             for x in pts_cur]\n                nnodes_cur = len(nodes_cur)\n                nodes += nodes_cur\n                entity_ids[dim][entity] = []\n                cur += nnodes_cur\n\n        entity_ids[dim][0] = list(range(len(nodes)))\n\n        super(DiscontinuousLagrangeDualSet, self).__init__(nodes, ref_el, entity_ids)\n\n\nclass HigherOrderDiscontinuousLagrange(finite_element.CiarletElement):\n    \"\"\"The discontinuous Lagrange finite element.  It is what it is.\"\"\"\n\n    def __init__(self, ref_el, degree):\n        poly_set = polynomial_set.ONPolynomialSet(ref_el, degree)\n        dual = DiscontinuousLagrangeDualSet(ref_el, degree)\n        formdegree = ref_el.get_spatial_dimension()  # n-form\n        super(HigherOrderDiscontinuousLagrange, self).__init__(poly_set, dual, degree, formdegree)\n\n\ndef DiscontinuousLagrange(ref_el, degree):\n    if degree == 0:\n        return P0.P0(ref_el)\n    else:\n        return HigherOrderDiscontinuousLagrange(ref_el, degree)\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/discontinuous_lagrange.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, null, null, 1, null, null, null, null, null, 1, 1, 1, null, 1, 1, null, null, 1, 1, 1, 1, 1, 1, null, null, 1, 1, 1, 1, 1, 1, 1, null, null, 1, 1, 1, 1, null, null, 1, 1, 1, null, null, 1, null, 1, null, null, 1, null, null, 1, null, 1, 1, 1, 1, null], "source": "# Copyright (C) 2008-2012 Robert C. Kirby (Texas Tech University)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n#\n# Modified by Jan Blechta 2014\n\nfrom FIAT import dual_set, finite_element, functional\nfrom FIAT.raviart_thomas import RTSpace\n\n\nclass DRTDualSet(dual_set.DualSet):\n    \"\"\"Dual basis for Raviart-Thomas elements consisting of point\n    evaluation of normals on facets of codimension 1 and internal\n    moments against polynomials. This is the discontinuous version\n    where all nodes are topologically associated with the cell itself\"\"\"\n\n    def __init__(self, ref_el, degree):\n        entity_ids = {}\n        nodes = []\n\n        sd = ref_el.get_spatial_dimension()\n        t = ref_el.get_topology()\n\n        # codimension 1 facets\n        for i in range(len(t[sd - 1])):\n            pts_cur = ref_el.make_points(sd - 1, i, sd + degree)\n            for j in range(len(pts_cur)):\n                pt_cur = pts_cur[j]\n                f = functional.PointScaledNormalEvaluation(ref_el, i, pt_cur)\n                nodes.append(f)\n\n        # internal nodes.  Let's just use points at a lattice\n        if degree > 0:\n            cpe = functional.ComponentPointEvaluation\n            pts = ref_el.make_points(sd, 0, degree + sd)\n            for d in range(sd):\n                for i in range(len(pts)):\n                    l_cur = cpe(ref_el, d, (sd,), pts[i])\n                    nodes.append(l_cur)\n\n        # sets vertices (and in 3d, edges) to have no nodes\n        for i in range(sd - 1):\n            entity_ids[i] = {}\n            for j in range(len(t[i])):\n                entity_ids[i][j] = []\n\n        # set codimension 1 (edges 2d, faces 3d) to have no dofs\n        entity_ids[sd - 1] = {}\n        for i in range(len(t[sd - 1])):\n            entity_ids[sd - 1][i] = []\n\n        # cell dofs\n        entity_ids[sd] = {0: list(range(len(nodes)))}\n\n        super(DRTDualSet, self).__init__(nodes, ref_el, entity_ids)\n\n\nclass DiscontinuousRaviartThomas(finite_element.CiarletElement):\n    \"\"\"The discontinuous Raviart-Thomas finite element\"\"\"\n\n    def __init__(self, ref_el, q):\n\n        degree = q - 1\n        poly_set = RTSpace(ref_el, degree)\n        dual = DRTDualSet(ref_el, degree)\n        super(DiscontinuousRaviartThomas, self).__init__(poly_set, dual, degree,\n                                                         mapping=\"contravariant piola\")\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/discontinuous_raviart_thomas.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, null, null, 1, null, null, null, null, 1, 1, 1, null, 1, null, 1, 1, null, 1, 1, 1, null, 1, 1, null, 1, null, 1, null, 1, null, null, 1, null, null, 1, 1, 1, 1, 1, null, null, 1, 1, 1, null, 1], "source": "# Copyright (C) 2008 Robert C. Kirby (Texas Tech University)\n# Modified by Colin Cotter (Imperial College London)\n#             David Ham (Imperial College London)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nfrom FIAT import finite_element, polynomial_set, dual_set, functional, P0, quadrature\nfrom FIAT.polynomial_set import mis\nimport numpy\n\n\nclass DiscontinuousTaylorDualSet(dual_set.DualSet):\n    \"\"\"The dual basis for Taylor elements.  This class works for\n    intervals.  Nodes are function and derivative evaluation\n    at the midpoint.\"\"\"\n\n    def __init__(self, ref_el, degree):\n        nodes = []\n        dim = ref_el.get_spatial_dimension()\n\n        Q = quadrature.make_quadrature(ref_el, 2 * (degree + 1))\n\n        f_at_qpts = numpy.ones(len(Q.wts))\n        nodes.append(functional.IntegralMoment(ref_el, Q, f_at_qpts))\n\n        vertices = ref_el.get_vertices()\n        midpoint = tuple(sum(numpy.array(vertices)) / len(vertices))\n        for k in range(1, degree + 1):\n            # Loop over all multi-indices of degree k.\n            for alpha in mis(dim, k):\n                nodes.append(functional.PointDerivative(ref_el, midpoint, alpha))\n\n        entity_ids = {d: {e: [] for e in ref_el.sub_entities[d]}\n                      for d in range(dim + 1)}\n        entity_ids[dim][0] = list(range(len(nodes)))\n\n        super(DiscontinuousTaylorDualSet, self).__init__(nodes, ref_el, entity_ids)\n\n\nclass HigherOrderDiscontinuousTaylor(finite_element.CiarletElement):\n    \"\"\"The discontinuous Taylor finite element. Use a Taylor basis for DG.\"\"\"\n\n    def __init__(self, ref_el, degree):\n        poly_set = polynomial_set.ONPolynomialSet(ref_el, degree)\n        dual = DiscontinuousTaylorDualSet(ref_el, degree)\n        formdegree = ref_el.get_spatial_dimension()  # n-form\n        super(HigherOrderDiscontinuousTaylor, self).__init__(poly_set, dual, degree, formdegree)\n\n\ndef DiscontinuousTaylor(ref_el, degree):\n    if degree == 0:\n        return P0.P0(ref_el)\n    else:\n        return HigherOrderDiscontinuousTaylor(ref_el, degree)\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/discontinuous_taylor.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, null, 1, 1, 1, 1, 1, null, null, 1, 1, 1, null, 1, 1, null, 1, 1, 1, 1, null, 1, 1, null, 1, 1, null, 1, 1, null, 1, 1, null, 1, null, 1, 1, 1, 1, null, 1, null, 1, null, 1, 1, null, 1], "source": "# Copyright (C) 2008-2012 Robert C. Kirby (Texas Tech University)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nimport numpy\n\n\nclass DualSet(object):\n    def __init__(self, nodes, ref_el, entity_ids):\n        self.nodes = nodes\n        self.ref_el = ref_el\n        self.entity_ids = entity_ids\n\n        # Compute the nodes on the closure of each sub_entity.\n        self.entity_closure_ids = {}\n        for dim, entities in ref_el.sub_entities.items():\n            self.entity_closure_ids[dim] = {}\n\n            for e, sub_entities in entities.items():\n                ids = []\n\n                for d, se in sub_entities:\n                    ids += self.entity_ids[d][se]\n                ids.sort()\n                self.entity_closure_ids[d][e] = ids\n\n    def get_nodes(self):\n        return self.nodes\n\n    def get_entity_closure_ids(self):\n        return self.entity_closure_ids\n\n    def get_entity_ids(self):\n        return self.entity_ids\n\n    def get_reference_element(self):\n        return self.ref_el\n\n    def to_riesz(self, poly_set):\n\n        tshape = self.nodes[0].target_shape\n        num_nodes = len(self.nodes)\n        es = poly_set.get_expansion_set()\n        num_exp = es.get_num_members(poly_set.get_embedded_degree())\n\n        riesz_shape = tuple([num_nodes] + list(tshape) + [num_exp])\n\n        self.mat = numpy.zeros(riesz_shape, \"d\")\n\n        for i in range(len(self.nodes)):\n            self.mat[i][:] = self.nodes[i].to_riesz(poly_set)\n\n        return self.mat\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/dual_set.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, 1, null, 1, 1, 1, null, null, 1, null, null, 1, null, null, null, null, null, null, null, 1, null, null, null, null, 1, 0, 1, 0, 1, 0, null, null, null, 1, null, null, null, 1, 0, null, 1, null, null, 1, 1, null, null, null, 1, null, null, 1, 1, null, 1, null, null, 1, null, null, 1, null, 1, null, 0, null, 1, null, 1, null, 1, null, null, 1, null, 1, null, null, 0, null, 1, null, null, null, 1, 1, null, 1, 1, 1, null, 1, 1, null, null, 1, null, 1, 1, 0, null, null, 1, null, null, 1, null, 1, null, 1, null, 1, 1, null, 1, null, null, 0, null, 1, null, 0], "source": "# Copyright (C) 2013 Andrew T. T. McRae, 2015-2016 Jan Blechta, and others\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nfrom itertools import chain\n\nimport numpy\n\nfrom FIAT.finite_element import FiniteElement\nfrom FIAT.dual_set import DualSet\nfrom FIAT.mixed import concatenate_entity_dofs\n\n\n__all__ = ['EnrichedElement']\n\n\nclass EnrichedElement(FiniteElement):\n    \"\"\"Class implementing a finite element that combined the degrees of freedom\n    of two existing finite elements.\n\n    This is an implementation which does not care about orthogonality of\n    primal and dual basis.\n    \"\"\"\n\n    def __init__(self, *elements):\n        # Firstly, check it makes sense to enrich.  Elements must have:\n        # - same reference element\n        # - same mapping\n        # - same value shape\n        if len(set(e.get_reference_element() for e in elements)) > 1:\n            raise ValueError(\"Elements must be defined on the same reference element\")\n        if len(set(m for e in elements for m in e.mapping())) > 1:\n            raise ValueError(\"Elements must have same mapping\")\n        if len(set(e.value_shape() for e in elements)) > 1:\n            raise ValueError(\"Elements must have the same value shape\")\n\n        # order is at least max, possibly more, though getting this\n        # right isn't important AFAIK\n        order = max(e.get_order() for e in elements)\n        # form degree is essentially max (not true for Hdiv/Hcurl,\n        # but this will raise an error above anyway).\n        # E.g. an H^1 function enriched with an L^2 is now just L^2.\n        if any(e.get_formdegree() is None for e in elements):\n            formdegree = None\n        else:\n            formdegree = max(e.get_formdegree() for e in elements)\n\n        # set up reference element and mapping, following checks above\n        ref_el, = set(e.get_reference_element() for e in elements)\n        mapping, = set(m for e in elements for m in e.mapping())\n\n        # set up entity_ids - for each geometric entity, just concatenate\n        # the entities of the constituent elements\n        entity_ids = concatenate_entity_dofs(ref_el, elements)\n\n        # set up dual basis - just concatenation\n        nodes = list(chain.from_iterable(e.dual_basis() for e in elements))\n        dual = DualSet(nodes, ref_el, entity_ids)\n\n        super(EnrichedElement, self).__init__(ref_el, dual, order, formdegree, mapping)\n\n        # required degree (for quadrature) is definitely max\n        self.polydegree = max(e.degree() for e in elements)\n\n        # Store subelements\n        self._elements = elements\n\n    def elements(self):\n        \"Return reference to original subelements\"\n        return self._elements\n\n    def degree(self):\n        \"\"\"Return the degree of the (embedding) polynomial space.\"\"\"\n        return self.polydegree\n\n    def get_nodal_basis(self):\n        \"\"\"Return the nodal basis, encoded as a PolynomialSet object,\n        for the finite element.\"\"\"\n        raise NotImplementedError(\"get_nodal_basis not implemented\")\n\n    def get_coeffs(self):\n        \"\"\"Return the expansion coefficients for the basis of the\n        finite element.\"\"\"\n        raise NotImplementedError(\"get_coeffs not implemented\")\n\n    def tabulate(self, order, points, entity=None):\n        \"\"\"Return tabulated values of derivatives up to given order of\n        basis functions at given points.\"\"\"\n\n        num_components = numpy.prod(self.value_shape())\n        table_shape = (self.space_dimension(), num_components, len(points))\n\n        table = {}\n        irange = slice(0)\n        for element in self._elements:\n\n            etable = element.tabulate(order, points, entity)\n            irange = slice(irange.stop, irange.stop + element.space_dimension())\n\n            # Insert element table into table\n            for dtuple in etable.keys():\n\n                if dtuple not in table:\n                    if num_components == 1:\n                        table[dtuple] = numpy.zeros((self.space_dimension(), len(points)),\n                                                    dtype=etable[dtuple].dtype)\n                    else:\n                        table[dtuple] = numpy.zeros(table_shape,\n                                                    dtype=etable[dtuple].dtype)\n\n                table[dtuple][irange][:] = etable[dtuple]\n\n        return table\n\n    def value_shape(self):\n        \"\"\"Return the value shape of the finite element functions.\"\"\"\n        result, = set(e.value_shape() for e in self._elements)\n        return result\n\n    def dmats(self):\n        \"\"\"Return dmats: expansion coefficients for basis function\n        derivatives.\"\"\"\n        raise NotImplementedError(\"dmats not implemented\")\n\n    def get_num_members(self, arg):\n        \"\"\"Return number of members of the expansion set.\"\"\"\n        raise NotImplementedError(\"get_num_members not implemented\")\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/enriched.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, null, null, 1, 1, 1, 1, 1, null, null, 1, 1, 1, 1, 1, null, null, 1, 1, null, 1, null, null, null, null, null, null, null, 1, 1, 1, 1, null, null, null, 1, 1, 1, null, 1, null, null, null, null, null, null, 1, 1, 1, null, null, null, null, 1, 1, 1, null, 1, null, null, 1, 1, 1, 1, 1, null, null, 1, null, null, 1, null, 1, 1, 1, 1, 1, null, 1, 1, null, null, null, null, 1, 1, null, null, 1, null, 1, 1, 1, 1, null, null, 1, null, 1, 1, 1, 1, 1, null, null, 1, null, null, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, null, 1, 1, null, 1, null, 1, 1, 1, null, 1, 1, 1, null, 1, null, 0, null, 1, null, null, null, null, 1, 1, null, null, 1, null, 1, 1, 1, null, 1, 1, null, null, 1, 1, 1, 1, 1, null, 1, null, null, 1, null, null, null, 1, 1, 0, 1, 1, 1, 1, 1, 1, null, null, 1, 1, null, 1, 1, 0, null, 1, null, 1, null, null, 1, 1, null, null, 1, 1, null, 1, null, 1, null, null, null, 1, 1, null, 1, 1, null, 1, 1, 1, null, 1, null, 1, 1, null, 1, null, null, 1, 1, null, null, 1, 1, 1, 1, null, null, null, 1, 1, 1, null, 1, null, null, 1, 1, 1, null, null, null, 1, 1, 1, null, null, 1, null, 1, 0, null, null, 1, null, null, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, null, 1, 1, null, 1, 1, 0, null, 1, null, 1, null, null, 1, 1, null, null, 1, 1, null, 1, 1, null, null, null, null, 1, 1, null, 1, 1, 1, null, 1, 1, 1, 1, 1, null, 1, 1, 1, 1, 1, null, null, null, 1, 1, null, null, 1, 1, 1, 1, 1, 1, null, null, null, 1, 1, 1, null, null, null, 1, 1, 1, 1, 1, null, null, null, 1, 1, 1, 1, null, null, 1, null, 1, 1, 1, 1, null, null, null, 1, 1, 1, null, null, 1, null, 1, 0, null, null, 1, null, null, 1, 1, 1, 1, 1, 1, null, 0, null, null, 1, null, null, 1, 1, 1, 1, 1, 1, null, 0], "source": "# Copyright (C) 2008 Robert C. Kirby (Texas Tech University)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"Principal orthogonal expansion functions as defined by Karniadakis\nand Sherwin.  These are parametrized over a reference element so as\nto allow users to get coordinates that they want.\"\"\"\n\nimport numpy\nimport math\nimport sympy\nfrom FIAT import reference_element\nfrom FIAT import jacobi\n\n\ndef jrc(a, b, n):\n    an = (2*n+1+a+b)*(2*n+2+a+b) / (2*(n+1)*(n+1+a+b))\n    bn = (a*a-b*b) * (2*n+1+a+b) / (2*(n+1)*(2*n+a+b)*(n+1+a+b))\n    cn = (n+a)*(n+b)*(2*n+2+a+b) / ((n+1)*(n+1+a+b)*(2*n+a+b))\n    return an, bn, cn\n\n\ndef _tabulate_dpts(tabulator, D, n, order, pts):\n    X = sympy.DeferredVector('x')\n\n    def form_derivative(F):\n        '''Forms the derivative recursively, i.e.,\n        F               -> [F_x, F_y, F_z],\n        [F_x, F_y, F_z] -> [[F_xx, F_xy, F_xz],\n                            [F_yx, F_yy, F_yz],\n                            [F_zx, F_zy, F_zz]]\n        and so forth.\n        '''\n        out = []\n        try:\n            out = [sympy.diff(F, X[j]) for j in range(D)]\n        except (AttributeError, ValueError):\n            # Intercept errors like\n            #  AttributeError: 'list' object has no attribute\n            #  'free_symbols'\n            for f in F:\n                out.append(form_derivative(f))\n        return out\n\n    def numpy_lambdify(X, F):\n        '''Unfortunately, SymPy's own lambdify() doesn't work well with\n        NumPy in that simple functions like\n            lambda x: 1.0,\n        when evaluated with NumPy arrays, return just \"1.0\" instead of\n        an array of 1s with the same shape as x. This function does that.\n        '''\n        try:\n            lambda_x = [numpy_lambdify(X, f) for f in F]\n        except TypeError:  # 'function' object is not iterable\n            # SymPy's lambdify also works on functions that return arrays.\n            # However, use it componentwise here so we can add 0*x to each\n            # component individually. This is necessary to maintain shapes\n            # if evaluated with NumPy arrays.\n            lmbd_tmp = sympy.lambdify(X, F)\n            lambda_x = lambda x: lmbd_tmp(x) + 0 * x[0]\n        return lambda_x\n\n    def evaluate_lambda(lmbd, x):\n        '''Properly evaluate lambda expressions recursively for iterables.\n        '''\n        try:\n            values = [evaluate_lambda(l, x) for l in lmbd]\n        except TypeError:  # 'function' object is not iterable\n            values = lmbd(x)\n        return values\n\n    # Tabulate symbolically\n    symbolic_tab = tabulator(n, X)\n    # Make sure that the entries of symbolic_tab are lists so we can\n    # append derivatives\n    symbolic_tab = [[phi] for phi in symbolic_tab]\n    #\n    data = (order + 1) * [None]\n    for r in range(order + 1):\n        shape = [len(symbolic_tab), len(pts)] + r * [D]\n        data[r] = numpy.empty(shape)\n        for i, phi in enumerate(symbolic_tab):\n            # Evaluate the function numerically using lambda expressions\n            deriv_lambda = numpy_lambdify(X, phi[r])\n            data[r][i] = \\\n                numpy.array(evaluate_lambda(deriv_lambda, pts.T)).T\n            # Symbolically compute the next derivative.\n            # This actually happens once too many here; never mind for\n            # now.\n            phi.append(form_derivative(phi[-1]))\n    return data\n\n\ndef xi_triangle(eta):\n    \"\"\"Maps from [-1,1]^2 to the (-1,1) reference triangle.\"\"\"\n    eta1, eta2 = eta\n    xi1 = 0.5 * (1.0 + eta1) * (1.0 - eta2) - 1.0\n    xi2 = eta2\n    return (xi1, xi2)\n\n\ndef xi_tetrahedron(eta):\n    \"\"\"Maps from [-1,1]^3 to the -1/1 reference tetrahedron.\"\"\"\n    eta1, eta2, eta3 = eta\n    xi1 = 0.25 * (1. + eta1) * (1. - eta2) * (1. - eta3) - 1.\n    xi2 = 0.5 * (1. + eta2) * (1. - eta3) - 1.\n    xi3 = eta3\n    return xi1, xi2, xi3\n\n\nclass LineExpansionSet(object):\n    \"\"\"Evaluates the Legendre basis on a line reference element.\"\"\"\n\n    def __init__(self, ref_el):\n        if ref_el.get_spatial_dimension() != 1:\n            raise Exception(\"Must have a line\")\n        self.ref_el = ref_el\n        self.base_ref_el = reference_element.DefaultLine()\n        v1 = ref_el.get_vertices()\n        v2 = self.base_ref_el.get_vertices()\n        self.A, self.b = reference_element.make_affine_mapping(v1, v2)\n        self.mapping = lambda x: numpy.dot(self.A, x) + self.b\n        self.scale = numpy.sqrt(numpy.linalg.det(self.A))\n\n    def get_num_members(self, n):\n        return n + 1\n\n    def tabulate(self, n, pts):\n        \"\"\"Returns a numpy array A[i,j] = phi_i(pts[j])\"\"\"\n        if len(pts) > 0:\n            ref_pts = numpy.array([self.mapping(pt) for pt in pts])\n            psitilde_as = jacobi.eval_jacobi_batch(0, 0, n, ref_pts)\n\n            results = numpy.zeros((n + 1, len(pts)), type(pts[0][0]))\n            for k in range(n + 1):\n                results[k, :] = psitilde_as[k, :] * math.sqrt(k + 0.5)\n\n            return results\n        else:\n            return []\n\n    def tabulate_derivatives(self, n, pts):\n        \"\"\"Returns a tuple of length one (A,) such that\n        A[i,j] = D phi_i(pts[j]).  The tuple is returned for\n        compatibility with the interfaces of the triangle and\n        tetrahedron expansions.\"\"\"\n        ref_pts = numpy.array([self.mapping(pt) for pt in pts])\n        psitilde_as_derivs = jacobi.eval_jacobi_deriv_batch(0, 0, n, ref_pts)\n\n        # Jacobi polynomials defined on [-1, 1], first derivatives need scaling\n        psitilde_as_derivs *= 2.0 / self.ref_el.volume()\n\n        results = numpy.zeros((n + 1, len(pts)), \"d\")\n        for k in range(0, n + 1):\n            results[k, :] = psitilde_as_derivs[k, :] * numpy.sqrt(k + 0.5)\n\n        vals = self.tabulate(n, pts)\n        deriv_vals = (results,)\n\n        # Create the ordinary data structure.\n        dv = []\n        for i in range(vals.shape[0]):\n            dv.append([])\n            for j in range(vals.shape[1]):\n                dv[-1].append((vals[i][j], [deriv_vals[0][i][j]]))\n\n        return dv\n\n\nclass TriangleExpansionSet(object):\n    \"\"\"Evaluates the orthonormal Dubiner basis on a triangular\n    reference element.\"\"\"\n\n    def __init__(self, ref_el):\n        if ref_el.get_spatial_dimension() != 2:\n            raise Exception(\"Must have a triangle\")\n        self.ref_el = ref_el\n        self.base_ref_el = reference_element.DefaultTriangle()\n        v1 = ref_el.get_vertices()\n        v2 = self.base_ref_el.get_vertices()\n        self.A, self.b = reference_element.make_affine_mapping(v1, v2)\n        self.mapping = lambda x: numpy.dot(self.A, x) + self.b\n#        self.scale = numpy.sqrt(numpy.linalg.det(self.A))\n\n    def get_num_members(self, n):\n        return (n + 1) * (n + 2) // 2\n\n    def tabulate(self, n, pts):\n        if len(pts) == 0:\n            return numpy.array([])\n        else:\n            return numpy.array(self._tabulate(n, numpy.array(pts).T))\n\n    def _tabulate(self, n, pts):\n        '''A version of tabulate() that also works for a single point.\n        '''\n        m1, m2 = self.A.shape\n        ref_pts = [sum(self.A[i][j] * pts[j] for j in range(m2)) + self.b[i]\n                   for i in range(m1)]\n\n        def idx(p, q):\n            return (p + q) * (p + q + 1) // 2 + q\n\n        results = ((n + 1) * (n + 2) // 2) * [None]\n\n        results[0] = 1.0 \\\n            + pts[0] - pts[0] \\\n            + pts[1] - pts[1]\n\n        if n == 0:\n            return results\n\n        x = ref_pts[0]\n        y = ref_pts[1]\n\n        f1 = (1.0 + 2 * x + y) / 2.0\n        f2 = (1.0 - y) / 2.0\n        f3 = f2**2\n\n        results[idx(1, 0)] = f1\n\n        for p in range(1, n):\n            a = (2.0 * p + 1) / (1.0 + p)\n            # b = p / (p+1.0)\n            results[idx(p+1, 0)] = a * f1 * results[idx(p, 0)] \\\n                - p/(1.0+p) * f3 * results[idx(p-1, 0)]\n\n        for p in range(n):\n            results[idx(p, 1)] = 0.5 * (1+2.0*p+(3.0+2.0*p)*y) \\\n                * results[idx(p, 0)]\n\n        for p in range(n - 1):\n            for q in range(1, n - p):\n                (a1, a2, a3) = jrc(2 * p + 1, 0, q)\n                results[idx(p, q+1)] = \\\n                    (a1 * y + a2) * results[idx(p, q)] \\\n                    - a3 * results[idx(p, q-1)]\n\n        for p in range(n + 1):\n            for q in range(n - p + 1):\n                results[idx(p, q)] *= math.sqrt((p + 0.5) * (p + q + 1.0))\n\n        return results\n        # return self.scale * results\n\n    def tabulate_derivatives(self, n, pts):\n        order = 1\n        data = _tabulate_dpts(self._tabulate, 2, n, order, numpy.array(pts))\n        # Put data in the required data structure, i.e.,\n        # k-tuples which contain the value, and the k-1 derivatives\n        # (gradient, Hessian, ...)\n        m = data[0].shape[0]\n        n = data[0].shape[1]\n        data2 = [[tuple([data[r][i][j] for r in range(order+1)])\n                  for j in range(n)]\n                 for i in range(m)]\n        return data2\n\n    def tabulate_jet(self, n, pts, order=1):\n        return _tabulate_dpts(self._tabulate, 2, n, order, numpy.array(pts))\n\n\nclass TetrahedronExpansionSet(object):\n    \"\"\"Collapsed orthonormal polynomial expanion on a tetrahedron.\"\"\"\n\n    def __init__(self, ref_el):\n        if ref_el.get_spatial_dimension() != 3:\n            raise Exception(\"Must be a tetrahedron\")\n        self.ref_el = ref_el\n        self.base_ref_el = reference_element.DefaultTetrahedron()\n        v1 = ref_el.get_vertices()\n        v2 = self.base_ref_el.get_vertices()\n        self.A, self.b = reference_element.make_affine_mapping(v1, v2)\n        self.mapping = lambda x: numpy.dot(self.A, x) + self.b\n        self.scale = numpy.sqrt(numpy.linalg.det(self.A))\n\n    def get_num_members(self, n):\n        return (n + 1) * (n + 2) * (n + 3) // 6\n\n    def tabulate(self, n, pts):\n        if len(pts) == 0:\n            return numpy.array([])\n        else:\n            return numpy.array(self._tabulate(n, numpy.array(pts).T))\n\n    def _tabulate(self, n, pts):\n        '''A version of tabulate() that also works for a single point.\n        '''\n        m1, m2 = self.A.shape\n        ref_pts = [sum(self.A[i][j] * pts[j] for j in range(m2)) + self.b[i]\n                   for i in range(m1)]\n\n        def idx(p, q, r):\n            return (p + q + r)*(p + q + r + 1)*(p + q + r + 2)//6 + (q + r)*(q + r + 1)//2 + r\n\n        results = ((n + 1) * (n + 2) * (n + 3) // 6) * [None]\n        results[0] = 1.0 \\\n            + pts[0] - pts[0] \\\n            + pts[1] - pts[1] \\\n            + pts[2] - pts[2]\n\n        if n == 0:\n            return results\n\n        x = ref_pts[0]\n        y = ref_pts[1]\n        z = ref_pts[2]\n\n        factor1 = 0.5 * (2.0 + 2.0 * x + y + z)\n        factor2 = (0.5 * (y + z))**2\n        factor3 = 0.5 * (1 + 2.0 * y + z)\n        factor4 = 0.5 * (1 - z)\n        factor5 = factor4**2\n\n        results[idx(1, 0, 0)] = factor1\n        for p in range(1, n):\n            a1 = (2.0 * p + 1.0) / (p + 1.0)\n            a2 = p / (p + 1.0)\n            results[idx(p+1, 0, 0)] = a1 * factor1 * results[idx(p, 0, 0)] \\\n                - a2 * factor2 * results[idx(p-1, 0, 0)]\n\n        # q = 1\n        for p in range(0, n):\n            results[idx(p, 1, 0)] = results[idx(p, 0, 0)] \\\n                * (p * (1.0 + y) + (2.0 + 3.0 * y + z) / 2)\n\n        for p in range(0, n - 1):\n            for q in range(1, n - p):\n                (aq, bq, cq) = jrc(2 * p + 1, 0, q)\n                qmcoeff = aq * factor3 + bq * factor4\n                qm1coeff = cq * factor5\n                results[idx(p, q+1, 0)] = qmcoeff * results[idx(p, q, 0)] \\\n                    - qm1coeff * results[idx(p, q-1, 0)]\n\n        # now handle r=1\n        for p in range(n):\n            for q in range(n - p):\n                results[idx(p, q, 1)] = results[idx(p, q, 0)] \\\n                    * (1.0 + p + q + (2.0 + q + p) * z)\n\n        # general r by recurrence\n        for p in range(n - 1):\n            for q in range(0, n - p - 1):\n                for r in range(1, n - p - q):\n                    ar, br, cr = jrc(2 * p + 2 * q + 2, 0, r)\n                    results[idx(p, q, r+1)] = \\\n                        (ar * z + br) * results[idx(p, q, r)] \\\n                        - cr * results[idx(p, q, r-1)]\n\n        for p in range(n + 1):\n            for q in range(n - p + 1):\n                for r in range(n - p - q + 1):\n                    results[idx(p, q, r)] *= \\\n                        math.sqrt((p+0.5)*(p+q+1.0)*(p+q+r+1.5))\n\n        return results\n\n    def tabulate_derivatives(self, n, pts):\n        order = 1\n        D = 3\n        data = _tabulate_dpts(self._tabulate, D, n, order, numpy.array(pts))\n        # Put data in the required data structure, i.e.,\n        # k-tuples which contain the value, and the k-1 derivatives\n        # (gradient, Hessian, ...)\n        m = data[0].shape[0]\n        n = data[0].shape[1]\n        data2 = [[tuple([data[r][i][j] for r in range(order + 1)])\n                  for j in range(n)]\n                 for i in range(m)]\n        return data2\n\n    def tabulate_jet(self, n, pts, order=1):\n        return _tabulate_dpts(self._tabulate, 3, n, order, numpy.array(pts))\n\n\ndef get_expansion_set(ref_el):\n    \"\"\"Returns an ExpansionSet instance appopriate for the given\n    reference element.\"\"\"\n    if ref_el.get_shape() == reference_element.LINE:\n        return LineExpansionSet(ref_el)\n    elif ref_el.get_shape() == reference_element.TRIANGLE:\n        return TriangleExpansionSet(ref_el)\n    elif ref_el.get_shape() == reference_element.TETRAHEDRON:\n        return TetrahedronExpansionSet(ref_el)\n    else:\n        raise Exception(\"Unknown reference element type.\")\n\n\ndef polynomial_dimension(ref_el, degree):\n    \"\"\"Returns the dimension of the space of polynomials of degree no\n    greater than degree on the reference element.\"\"\"\n    if ref_el.get_shape() == reference_element.LINE:\n        return max(0, degree + 1)\n    elif ref_el.get_shape() == reference_element.TRIANGLE:\n        return max((degree + 1) * (degree + 2) // 2, 0)\n    elif ref_el.get_shape() == reference_element.TETRAHEDRON:\n        return max(0, (degree + 1) * (degree + 2) * (degree + 3) // 6)\n    else:\n        raise Exception(\"Unknown reference element type.\")\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/expansions.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, 1, 1, null, null, 1, null, null, null, null, null, 1, null, null, 1, 1, null, null, 1, 1, null, null, 1, null, 1, null, 1, null, 1, null, 1, null, 1, null, 1, null, 1, null, null, 1, null, 1, null, null, 1, null, 1, null, null, 1, null, 1, null, 1, null, 1, null, null, null, 1, null, 1, null, 1, null, 1, null, 1, null, 1, null, null, null, null, null, null, null, null, null, null, 0, null, 1, null, null, null, null, null, null, 1, null, null, 1, null, null, null, null, null, null, null, 1, 1, 1, null, null, 1, 1, null, 1, 1, 1, null, 1, 1, null, 1, 1, null, 1, 1, null, 1, null, 1, null, 1, 1, null, 1, null, null, null, null, null, null, 1, null, 1, null, 1, null, null, 1, null, 1, null, null, 1, null, 1, null, null, null, null, null, null, null, null, null, null, 1, 1, null, 1, 1, 1, null, 1, null, 1, null, 1, null, null, 1, null, 1, null, 0, null, 1, null, null, null, null, null, null, 1, null, null, 1, null, null, null, null, null, null, 1, 1, 1, 1, 1, 1, 1, null, 1, 1, null, 1, 1, 1, null, 1, null, 1, 1, 1, 1, null, null, 1, null, null, 1, null, 1, null, 1, null, 1, null, 1, 1], "source": "# Copyright (C) 2008 Robert C. Kirby (Texas Tech University)\n# Modified by Andrew T. T. McRae (Imperial College London)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n#\n# Modified by David A. Ham (david.ham@imperial.ac.uk), 2014\n# Modified by Thomas H. Gibson (t.gibson15@imperial.ac.uk), 2016\n\nimport numpy\n\nfrom FIAT.polynomial_set import PolynomialSet\nfrom FIAT.quadrature_schemes import create_quadrature\n\n\nclass FiniteElement(object):\n    \"\"\"Class implementing a basic abstraction template for general\n    finite element families. Finite elements which inherit from\n    this class are non-nodal unless they are CiarletElement subclasses.\n    \"\"\"\n\n    def __init__(self, ref_el, dual, order, formdegree=None, mapping=\"affine\"):\n        # Relevant attributes that do not necessarily depend on a PolynomialSet object:\n        # The order (degree) of the polynomial basis\n        self.order = order\n        self.formdegree = formdegree\n\n        # The reference element and the appropriate dual\n        self.ref_el = ref_el\n        self.dual = dual\n\n        # The appropriate mapping for the finite element space\n        self._mapping = mapping\n\n    def get_reference_element(self):\n        \"\"\"Return the reference element for the finite element.\"\"\"\n        return self.ref_el\n\n    def get_dual_set(self):\n        \"\"\"Return the dual for the finite element.\"\"\"\n        return self.dual\n\n    def get_order(self):\n        \"\"\"Return the order of the element (may be different from the degree).\"\"\"\n        return self.order\n\n    def dual_basis(self):\n        \"\"\"Return the dual basis (list of functionals) for the finite\n        element.\"\"\"\n        return self.dual.get_nodes()\n\n    def entity_dofs(self):\n        \"\"\"Return the map of topological entities to degrees of\n        freedom for the finite element.\"\"\"\n        return self.dual.get_entity_ids()\n\n    def entity_closure_dofs(self):\n        \"\"\"Return the map of topological entities to degrees of\n        freedom on the closure of those entities for the finite element.\"\"\"\n        return self.dual.get_entity_closure_ids()\n\n    def get_formdegree(self):\n        \"\"\"Return the degree of the associated form (FEEC)\"\"\"\n        return self.formdegree\n\n    def mapping(self):\n        \"\"\"Return a list of appropriate mappings from the reference\n        element to a physical element for each basis function of the\n        finite element.\"\"\"\n        return [self._mapping] * self.space_dimension()\n\n    def num_sub_elements(self):\n        \"\"\"Return the number of sub-elements.\"\"\"\n        return 1\n\n    def space_dimension(self):\n        \"\"\"Return the dimension of the finite element space.\"\"\"\n        return len(self.dual_basis())\n\n    def tabulate(self, order, points, entity=None):\n        \"\"\"Return tabulated values of derivatives up to given order of\n        basis functions at given points.\n\n        :arg order: The maximum order of derivative.\n        :arg points: An iterable of points.\n        :arg entity: Optional (dimension, entity number) pair\n                     indicating which topological entity of the\n                     reference element to tabulate on.  If ``None``,\n                     default cell-wise tabulation is performed.\n        \"\"\"\n        raise NotImplementedError(\"Must be specified in the element subclass of FiniteElement.\")\n\n    @staticmethod\n    def is_nodal():\n        \"\"\"True if primal and dual bases are orthogonal. If false,\n        dual basis is not implemented or is undefined.\n\n        Subclasses may not necessarily be nodal, unless it is a CiarletElement.\n        \"\"\"\n        return False\n\n\nclass CiarletElement(FiniteElement):\n    \"\"\"Class implementing Ciarlet's abstraction of a finite element\n    being a domain, function space, and set of nodes.\n\n    Elements derived from this class are nodal finite elements, with a nodal\n    basis generated from polynomials encoded in a `PolynomialSet`.\n    \"\"\"\n\n    def __init__(self, poly_set, dual, order, formdegree=None, mapping=\"affine\"):\n        ref_el = poly_set.get_reference_element()\n        super(CiarletElement, self).__init__(ref_el, dual, order, formdegree, mapping)\n\n        # build generalized Vandermonde matrix\n        old_coeffs = poly_set.get_coeffs()\n        dualmat = dual.to_riesz(poly_set)\n\n        shp = dualmat.shape\n        if len(shp) > 2:\n            num_cols = numpy.prod(shp[1:])\n\n            A = numpy.reshape(dualmat, (dualmat.shape[0], num_cols))\n            B = numpy.reshape(old_coeffs, (old_coeffs.shape[0], num_cols))\n        else:\n            A = dualmat\n            B = old_coeffs\n\n        V = numpy.dot(A, numpy.transpose(B))\n        self.V = V\n\n        Vinv = numpy.linalg.inv(V)\n\n        new_coeffs_flat = numpy.dot(numpy.transpose(Vinv), B)\n\n        new_shp = tuple([new_coeffs_flat.shape[0]] + list(shp[1:]))\n        new_coeffs = numpy.reshape(new_coeffs_flat, new_shp)\n\n        self.poly_set = PolynomialSet(ref_el,\n                                      poly_set.get_degree(),\n                                      poly_set.get_embedded_degree(),\n                                      poly_set.get_expansion_set(),\n                                      new_coeffs,\n                                      poly_set.get_dmats())\n\n    def degree(self):\n        \"Return the degree of the (embedding) polynomial space.\"\n        return self.poly_set.get_embedded_degree()\n\n    def get_nodal_basis(self):\n        \"\"\"Return the nodal basis, encoded as a PolynomialSet object,\n        for the finite element.\"\"\"\n        return self.poly_set\n\n    def get_coeffs(self):\n        \"\"\"Return the expansion coefficients for the basis of the\n        finite element.\"\"\"\n        return self.poly_set.get_coeffs()\n\n    def tabulate(self, order, points, entity=None):\n        \"\"\"Return tabulated values of derivatives up to given order of\n        basis functions at given points.\n\n        :arg order: The maximum order of derivative.\n        :arg points: An iterable of points.\n        :arg entity: Optional (dimension, entity number) pair\n                     indicating which topological entity of the\n                     reference element to tabulate on.  If ``None``,\n                     default cell-wise tabulation is performed.\n        \"\"\"\n        if entity is None:\n            entity = (self.ref_el.get_spatial_dimension(), 0)\n\n        entity_dim, entity_id = entity\n        transform = self.ref_el.get_entity_transform(entity_dim, entity_id)\n        return self.poly_set.tabulate(list(map(transform, points)), order)\n\n    def value_shape(self):\n        \"Return the value shape of the finite element functions.\"\n        return self.poly_set.get_shape()\n\n    def dmats(self):\n        \"\"\"Return dmats: expansion coefficients for basis function\n        derivatives.\"\"\"\n        return self.get_nodal_basis().get_dmats()\n\n    def get_num_members(self, arg):\n        \"Return number of members of the expansion set.\"\n        return self.get_nodal_basis().get_expansion_set().get_num_members(arg)\n\n    @staticmethod\n    def is_nodal():\n        \"\"\"True if primal and dual bases are orthogonal. If false,\n        dual basis is not implemented or is undefined.\n\n        All implementations/subclasses are nodal including this one.\n        \"\"\"\n        return True\n\n\ndef entity_support_dofs(elem, entity_dim):\n    \"\"\"Return the map of entity id to the degrees of freedom for which the\n    corresponding basis functions take non-zero values\n\n    :arg elem: FIAT finite element\n    :arg entity_dim: Dimension of the cell subentity.\n    \"\"\"\n    if not hasattr(elem, \"_entity_support_dofs\"):\n        elem._entity_support_dofs = {}\n    cache = elem._entity_support_dofs\n    try:\n        return cache[entity_dim]\n    except KeyError:\n        pass\n\n    ref_el = elem.get_reference_element()\n    dim = ref_el.get_spatial_dimension()\n\n    entity_cell = ref_el.construct_subelement(entity_dim)\n    quad = create_quadrature(entity_cell, max(2*elem.degree(), 1))\n    weights = quad.get_weights()\n\n    eps = 1.e-8  # Is this a safe value?\n\n    result = {}\n    for f in elem.entity_dofs()[entity_dim].keys():\n        entity_transform = ref_el.get_entity_transform(entity_dim, f)\n        points = list(map(entity_transform, quad.get_points()))\n\n        # Integrate the square of the basis functions on the facet.\n        vals = numpy.double(elem.tabulate(0, points)[(0,) * dim])\n        # Ints contains the square of the basis functions\n        # integrated over the facet.\n        if elem.value_shape():\n            # Vector-valued functions.\n            ints = numpy.dot(numpy.einsum(\"...ij,...ij->...j\", vals, vals), weights)\n        else:\n            ints = numpy.dot(vals**2, weights)\n\n        result[f] = [dof for dof, i in enumerate(ints) if i > eps]\n\n    cache[entity_dim] = result\n    return result\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/finite_element.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, 1, null, null, 1, null, null, null, 1, 0, 1, 1, 1, null, 1, 1, 1, 1, null, null, null, null, null, null, null, 1, null, null, null, null, null, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, null, 1, null, 1, null, null, null, null, null, null, 0, null, 1, 0, null, 1, null, null, null, 1, null, 1, null, 0, null, 1, null, null, 0, null, null, 1, null, null, null, 1, 1, 1, null, 1, null, null, null, null, 1, null, 1, null, null, 1, 1, 1, null, null, 1, 1, 1, null, 1, 0, null, 1, null, 1, 0, null, null, 1, null, null, null, 1, 1, 1, null, 1, null, 1, null, 1, 0, 0, null, null, 1, null, null, null, 1, 1, 0, 1, 0, 1, 1, 1, null, null, 1, 0, 0, null, null, 1, null, null, null, 1, 1, 1, 1, null, 1, null, 1, null, null, 1, null, 1, 1, null, 1, null, null, 1, null, 1, 1, null, 1, 1, null, 1, 1, null, 1, null, null, null, 1, null, null, 1, null, null, null, 1, null, 1, 1, 1, 1, null, 1, 1, 1, 1, 1, 1, null, 1, null, 1, 1, null, 1, 1, null, 1, 1, null, 1, null, null, 1, null, null, null, null, null, null, 1, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, 1, 1, 1, 1, null, 1, null, 1, 1, 1, null, 1, 0, 1, null, 1, 1, 1, 1, 1, 1, 1, null, 1, 1, null, 1, null, 1, null, null, 1, null, null, 1, 0, 0, 0, 0, null, 0, null, null, null, 0, 0, 0, 0, null, 0, null, 0, 0, 0, null, 0, null, null, 1, 0, 0, null, 0, 0, null, 0, 0, null, null, 0, null, 0, 0, null, 0, 0, null, null, null, 0, 0, null, 0, null, null, 1, null, 1, null, 1, 0, null, null, 1, null, 1, 1, 1, 1, 1, null, null, 1, null, null, null, null, 1, null, null, null, 1, 0, 0, 0, null, 0, null, 0, 0, null, null, 1, null, null, null, 1, 1, 1, 1, 1, 1, 1, null, 1, 0, 0, null, 1, null, 1, 1, 1, null, null, 1, null, null, null, 1, 1, 1, 1, 1, 1, 1, 1, null, 1, 0, 0, null, 1, 1, 1, 1, null, null, 1, null, null, null, null, 1, 1, 1, 1, null, 1, 1, null, 1, 0, 0, null, 1, 1, 1, 1, null, null, 1, null, null, null, null, null, null, null, null, null, null, 1, 1, null, 1, null, 1, null, null, 1, 1], "source": "# Copyright (C) 2008 Robert C. Kirby (Texas Tech University)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\n# functionals require:\n# - a degree of accuracy (-1 indicates that it works for all functions\n#   such as point evaluation)\n# - a reference element domain\n# - type information\n\nfrom collections import OrderedDict\nfrom itertools import chain\nimport numpy\nimport sympy\n\n\ndef index_iterator(shp):\n    \"\"\"Constructs a generator iterating over all indices in\n    shp in generalized column-major order  So if shp = (2,2), then we\n    construct the sequence (0,0),(0,1),(1,0),(1,1)\"\"\"\n    if len(shp) == 0:\n        return\n    elif len(shp) == 1:\n        for i in range(shp[0]):\n            yield [i]\n    else:\n        shp_foo = shp[1:]\n        for i in range(shp[0]):\n            for foo in index_iterator(shp_foo):\n                yield [i] + foo\n\n# also put in a \"jet_dict\" that maps\n# pt --> {wt, multiindex, comp}\n# the multiindex is an iterable of nonnegative\n# integers\n\n\nclass Functional(object):\n    \"\"\"Class implementing an abstract functional.\n    All functionals are discrete in the sense that\n    the are written as a weighted sum of (components of) their\n    argument evaluated at particular points.\"\"\"\n\n    def __init__(self, ref_el, target_shape, pt_dict, deriv_dict, functional_type):\n        self.ref_el = ref_el\n        self.target_shape = target_shape\n        self.pt_dict = pt_dict\n        self.deriv_dict = deriv_dict\n        self.functional_type = functional_type\n        if len(deriv_dict) > 0:\n            per_point = list(chain(*deriv_dict.values()))\n            alphas = [foo[1] for foo in per_point]\n            self.max_deriv_order = max([sum(foo) for foo in alphas])\n        else:\n            self.max_deriv_order = 0\n\n    def evaluate(self, f):\n        \"\"\"Obsolete and broken functional evaluation.\n\n        To evaluate the functional, call it on the target function:\n\n          functional(function)\n        \"\"\"\n        raise AttributeError(\"To evaluate the functional just call it on a function.\")\n\n    def __call__(self, fn):\n        raise NotImplementedError(\"Evaluation is not yet implemented for %s\" % type(self))\n\n    def get_point_dict(self):\n        \"\"\"Returns the functional information, which is a dictionary\n        mapping each point in the support of the functional to a list\n        of pairs containing the weight and component.\"\"\"\n        return self.pt_dict\n\n    def get_reference_element(self):\n        \"\"\"Returns the reference element.\"\"\"\n        return self.ref_el\n\n    def get_type_tag(self):\n        \"\"\"Returns the type of function (e.g. point evaluation or\n        normal component, which is probably handy for clients of FIAT\"\"\"\n        return self.functional_type\n\n    # overload me in subclasses to make life easier!!\n    def to_riesz(self, poly_set):\n        \"\"\"Constructs an array representation of the functional over\n        the base of the given polynomial_set so that f(phi) for any\n        phi in poly_set is given by a dot product.\"\"\"\n        es = poly_set.get_expansion_set()\n        ed = poly_set.get_embedded_degree()\n        pt_dict = self.get_point_dict()\n\n        pts = list(pt_dict.keys())\n\n        # bfs is matrix that is pdim rows by num_pts cols\n        # where pdim is the polynomial dimension\n\n        bfs = es.tabulate(ed, pts)\n\n        result = numpy.zeros(poly_set.coeffs.shape[1:], \"d\")\n\n        # loop over points\n        for j in range(len(pts)):\n            pt_cur = pts[j]\n            wc_list = pt_dict[pt_cur]\n\n            # loop over expansion functions\n            for i in range(bfs.shape[0]):\n                for (w, c) in wc_list:\n                    result[c][i] += w * bfs[i, j]\n\n        if self.deriv_dict:\n            raise NotImplementedError(\"Generic to_riesz implementation does not support derivatives\")\n\n        return result\n\n    def tostr(self):\n        return self.functional_type\n\n\nclass PointEvaluation(Functional):\n    \"\"\"Class representing point evaluation of scalar functions at a\n    particular point x.\"\"\"\n\n    def __init__(self, ref_el, x):\n        pt_dict = {x: [(1.0, tuple())]}\n        Functional.__init__(self, ref_el, tuple(), pt_dict, {}, \"PointEval\")\n\n    def __call__(self, fn):\n        \"\"\"Evaluate the functional on the function fn.\"\"\"\n        return fn(tuple(self.pt_dict.keys())[0])\n\n    def tostr(self):\n        x = list(map(str, list(self.pt_dict.keys())[0]))\n        return \"u(%s)\" % (','.join(x),)\n\n\nclass ComponentPointEvaluation(Functional):\n    \"\"\"Class representing point evaluation of a particular component\n    of a vector function at a particular point x.\"\"\"\n\n    def __init__(self, ref_el, comp, shp, x):\n        if len(shp) != 1:\n            raise Exception(\"Illegal shape\")\n        if comp < 0 or comp >= shp[0]:\n            raise Exception(\"Illegal component\")\n        self.comp = comp\n        pt_dict = {x: [(1.0, (comp,))]}\n        Functional.__init__(self, ref_el, shp, pt_dict, {},\n                            \"ComponentPointEval\")\n\n    def tostr(self):\n        x = list(map(str, list(self.pt_dict.keys())[0]))\n        return \"(u[%d](%s)\" % (self.comp, ','.join(x))\n\n\nclass PointDerivative(Functional):\n    \"\"\"Class representing point partial differentiation of scalar\n    functions at a particular point x.\"\"\"\n\n    def __init__(self, ref_el, x, alpha):\n        dpt_dict = {x: [(1.0, alpha, tuple())]}\n        self.alpha = alpha\n        self.order = sum(self.alpha)\n\n        Functional.__init__(self, ref_el, tuple(), {}, dpt_dict, \"PointDeriv\")\n\n    def __call__(self, fn):\n        \"\"\"Evaluate the functional on the function fn. Note that this depends\n        on sympy being able to differentiate fn.\"\"\"\n        x = list(self.deriv_dict.keys())[0]\n\n        X = sympy.DeferredVector('x')\n        dX = numpy.asarray([X[i] for i in range(len(x))])\n\n        dvars = tuple(d for d, a in zip(dX, self.alpha)\n                      for count in range(a))\n\n        return sympy.diff(fn(X), *dvars).evalf(subs=dict(zip(dX, x)))\n\n    def to_riesz(self, poly_set):\n        x = list(self.deriv_dict.keys())[0]\n\n        X = sympy.DeferredVector('x')\n        dx = numpy.asarray([X[i] for i in range(len(x))])\n\n        es = poly_set.get_expansion_set()\n        ed = poly_set.get_embedded_degree()\n\n        bfs = es.tabulate(ed, [dx])[:, 0]\n\n        # Expand the multi-index as a series of variables to\n        # differentiate with respect to.\n        dvars = tuple(d for d, a in zip(dx, self.alpha)\n                      for count in range(a))\n\n        return numpy.asarray([sympy.lambdify(X, sympy.diff(b, *dvars))(x)\n                              for b in bfs])\n\n\nclass PointNormalDerivative(Functional):\n\n    def __init__(self, ref_el, facet_no, pt):\n        n = ref_el.compute_normal(facet_no)\n        self.n = n\n        sd = ref_el.get_spatial_dimension()\n\n        alphas = []\n        for i in range(sd):\n            alpha = [0] * sd\n            alpha[i] = 1\n            alphas.append(alpha)\n        dpt_dict = {pt: [(n[i], alphas[i], tuple()) for i in range(sd)]}\n\n        Functional.__init__(self, ref_el, tuple(), {}, dpt_dict, \"PointNormalDeriv\")\n\n    def to_riesz(self, poly_set):\n        x = list(self.deriv_dict.keys())[0]\n\n        X = sympy.DeferredVector('x')\n        dx = numpy.asarray([X[i] for i in range(len(x))])\n\n        es = poly_set.get_expansion_set()\n        ed = poly_set.get_embedded_degree()\n\n        bfs = es.tabulate(ed, [dx])[:, 0]\n\n        # We need the gradient dotted with the normal.\n        return numpy.asarray(\n            [sympy.lambdify(\n                X, sum([sympy.diff(b, dxi)*ni\n                        for dxi, ni in zip(dx, self.n)]))(x)\n             for b in bfs])\n\n\nclass IntegralMoment(Functional):\n    \"\"\"An IntegralMoment is a functional\"\"\"\n\n    def __init__(self, ref_el, Q, f_at_qpts, comp=tuple(), shp=tuple()):\n        \"\"\"\n        Create IntegralMoment\n\n        *Arguments*\n\n          ref_el\n              The reference element (cell)\n          Q (QuadratureRule)\n              A quadrature rule for the integral\n          f_at_qpts\n              ???\n          comp (tuple)\n              A component ??? (Optional)\n          shp  (tuple)\n              The shape ??? (Optional)\n        \"\"\"\n        qpts, qwts = Q.get_points(), Q.get_weights()\n        pt_dict = OrderedDict()\n        self.comp = comp\n        for i in range(len(qpts)):\n            pt_cur = tuple(qpts[i])\n            pt_dict[pt_cur] = [(qwts[i] * f_at_qpts[i], comp)]\n        Functional.__init__(self, ref_el, shp, pt_dict, {}, \"IntegralMoment\")\n\n    def __call__(self, fn):\n        \"\"\"Evaluate the functional on the function fn.\"\"\"\n        pts = list(self.pt_dict.keys())\n        wts = numpy.array([foo[0][0] for foo in list(self.pt_dict.values())])\n        result = numpy.dot([fn(p) for p in pts], wts)\n\n        if self.comp:\n            result = result[self.comp]\n        return result\n\n    def to_riesz(self, poly_set):\n        es = poly_set.get_expansion_set()\n        ed = poly_set.get_embedded_degree()\n        pts = list(self.pt_dict.keys())\n        bfs = es.tabulate(ed, pts)\n        wts = numpy.array([foo[0][0] for foo in list(self.pt_dict.values())])\n        result = numpy.zeros(poly_set.coeffs.shape[1:], \"d\")\n\n        if len(self.comp) == 0:\n            result[:] = numpy.dot(bfs, wts)\n        else:\n            result[self.comp, :] = numpy.dot(bfs, wts)\n\n        return result\n\n\nclass IntegralMomentOfNormalDerivative(Functional):\n    \"\"\"Functional giving normal derivative integrated against some function on a facet.\"\"\"\n\n    def __init__(self, ref_el, facet_no, Q, f_at_qpts):\n        n = ref_el.compute_normal(facet_no)\n        self.n = n\n        self.f_at_qpts = f_at_qpts\n        self.Q = Q\n\n        sd = ref_el.get_spatial_dimension()\n\n        # map points onto facet\n\n        fmap = ref_el.get_entity_transform(sd-1, facet_no)\n        qpts, qwts = Q.get_points(), Q.get_weights()\n        dpts = [fmap(pt) for pt in qpts]\n        self.dpts = dpts\n\n        dpt_dict = OrderedDict()\n\n        alphas = [[1 if j == i else 0 for j in range(sd)] for i in range(sd)]\n        for j, pt in enumerate(dpts):\n            dpt_dict[tuple(pt)] = [(qwts[j]*n[i], alphas[i], tuple()) for i in range(sd)]\n\n        Functional.__init__(self, ref_el, tuple(),\n                            {}, dpt_dict, \"IntegralMomentOfNormalDerivative\")\n\n    def to_riesz(self, poly_set):\n        es = poly_set.get_expansion_set()\n        ed = poly_set.get_embedded_degree()\n\n        result = numpy.zeros(es.get_num_members(ed))\n        sd = self.ref_el.get_spatial_dimension()\n\n        X = sympy.DeferredVector('x')\n        dX = numpy.asarray([X[i] for i in range(sd)])\n\n        # evaluate bfs symbolically\n        bfs = es.tabulate(ed, [dX])[:, 0]\n\n        n = self.n\n        qwts = self.Q.get_weights()\n\n        for i in range(len(result)):\n            thing = sympy.lambdify(\n                X, sum([sympy.diff(bfs[i], dxi)*ni\n                        for dxi, ni in zip(dX, n)]))\n\n            for j, pt in enumerate(self.deriv_dict.keys()):\n                result[i] += qwts[j] * self.f_at_qpts[j] * thing(pt)\n\n        return result\n\n\nclass FrobeniusIntegralMoment(Functional):\n\n    def __init__(self, ref_el, Q, f_at_qpts):\n        # f_at_qpts is num components x num_qpts\n        if len(Q.get_points()) != f_at_qpts.shape[1]:\n            raise Exception(\"Mismatch in number of quadrature points and values\")\n\n        # make sure that shp is same shape as f given\n        shp = (f_at_qpts.shape[0],)\n\n        qpts, qwts = Q.get_points(), Q.get_weights()\n        pt_dict = {}\n        for i in range(len(qpts)):\n            pt_cur = tuple(qpts[i])\n            pt_dict[pt_cur] = [(qwts[i] * f_at_qpts[j, i], (j,))\n                               for j in range(f_at_qpts.shape[0])]\n\n        Functional.__init__(self, ref_el, shp, pt_dict, {}, \"FrobeniusIntegralMoment\")\n\n\n# point normals happen on a d-1 dimensional facet\n# pt is the \"physical\" point on that facet\nclass PointNormalEvaluation(Functional):\n    \"\"\"Implements the evaluation of the normal component of a vector at a\n    point on a facet of codimension 1.\"\"\"\n\n    def __init__(self, ref_el, facet_no, pt):\n        n = ref_el.compute_normal(facet_no)\n        self.n = n\n        sd = ref_el.get_spatial_dimension()\n\n        pt_dict = {pt: [(n[i], (i,)) for i in range(sd)]}\n\n        shp = (sd,)\n        Functional.__init__(self, ref_el, shp, pt_dict, {}, \"PointNormalEval\")\n\n\nclass PointEdgeTangentEvaluation(Functional):\n    \"\"\"Implements the evaluation of the tangential component of a\n    vector at a point on a facet of dimension 1.\"\"\"\n\n    def __init__(self, ref_el, edge_no, pt):\n        t = ref_el.compute_edge_tangent(edge_no)\n        self.t = t\n        sd = ref_el.get_spatial_dimension()\n        pt_dict = {pt: [(t[i], (i,)) for i in range(sd)]}\n        shp = (sd,)\n        Functional.__init__(self, ref_el, shp, pt_dict, {}, \"PointEdgeTangent\")\n\n    def tostr(self):\n        x = list(map(str, list(self.pt_dict.keys())[0]))\n        return \"(u.t)(%s)\" % (','.join(x),)\n\n    def to_riesz(self, poly_set):\n        # should be singleton\n        xs = list(self.pt_dict.keys())\n        phis = poly_set.get_expansion_set().tabulate(poly_set.get_embedded_degree(), xs)\n        return numpy.outer(self.t, phis)\n\n\nclass PointFaceTangentEvaluation(Functional):\n    \"\"\"Implements the evaluation of a tangential component of a\n    vector at a point on a facet of codimension 1.\"\"\"\n\n    def __init__(self, ref_el, face_no, tno, pt):\n        t = ref_el.compute_face_tangents(face_no)[tno]\n        self.t = t\n        self.tno = tno\n        sd = ref_el.get_spatial_dimension()\n        pt_dict = {pt: [(t[i], (i,)) for i in range(sd)]}\n        shp = (sd,)\n        Functional.__init__(self, ref_el, shp, pt_dict, {}, \"PointFaceTangent\")\n\n    def tostr(self):\n        x = list(map(str, list(self.pt_dict.keys())[0]))\n        return \"(u.t%d)(%s)\" % (self.tno, ','.join(x),)\n\n    def to_riesz(self, poly_set):\n        xs = list(self.pt_dict.keys())\n        phis = poly_set.get_expansion_set().tabulate(poly_set.get_embedded_degree(), xs)\n        return numpy.outer(self.t, phis)\n\n\nclass PointScaledNormalEvaluation(Functional):\n    \"\"\"Implements the evaluation of the normal component of a vector at a\n    point on a facet of codimension 1, where the normal is scaled by\n    the volume of that facet.\"\"\"\n\n    def __init__(self, ref_el, facet_no, pt):\n        self.n = ref_el.compute_scaled_normal(facet_no)\n        sd = ref_el.get_spatial_dimension()\n        shp = (sd,)\n\n        pt_dict = {pt: [(self.n[i], (i,)) for i in range(sd)]}\n        Functional.__init__(self, ref_el, shp, pt_dict, {}, \"PointScaledNormalEval\")\n\n    def tostr(self):\n        x = list(map(str, list(self.pt_dict.keys())[0]))\n        return \"(u.n)(%s)\" % (','.join(x),)\n\n    def to_riesz(self, poly_set):\n        xs = list(self.pt_dict.keys())\n        phis = poly_set.get_expansion_set().tabulate(poly_set.get_embedded_degree(), xs)\n        return numpy.outer(self.n, phis)\n\n\nclass PointwiseInnerProductEvaluation(Functional):\n    \"\"\"\n    This is a functional on symmetric 2-tensor fields. Let u be such a\n    field, p be a point, and v,w be vectors. This implements the evaluation\n    v^T u(p) w.\n\n    Clearly v^iu_{ij}w^j = u_{ij}v^iw^j. Thus the value can be computed\n    from the Frobenius inner product of u with wv^T. This gives the\n    correct weights.\n    \"\"\"\n\n    def __init__(self, ref_el, v, w, p):\n        sd = ref_el.get_spatial_dimension()\n\n        wvT = numpy.outer(w, v)\n\n        pt_dict = {p: [(wvT[i][j], (i, j))\n                       for i, j in index_iterator((sd, sd))]}\n\n        shp = (sd, sd)\n        Functional.__init__(self, ref_el, shp, pt_dict, {}, \"PointwiseInnerProductEval\")\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/functional.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, null, null, 1, null, null, 1, 1, null, 1, 1, null, 1, null, null, 1, null, 1, 1, 0, 1, 1, 1, 1], "source": "# Copyright (C) 2015 Imperial College London and others.\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n#\n# Written by David A. Ham (david.ham@imperial.ac.uk), 2015\n\nfrom FIAT import finite_element, polynomial_set, dual_set, functional, quadrature\nfrom FIAT.reference_element import LINE\n\n\nclass GaussLegendreDualSet(dual_set.DualSet):\n    \"\"\"The dual basis for 1D discontinuous elements with nodes at the\n    Gauss-Legendre points.\"\"\"\n    def __init__(self, ref_el, degree):\n        entity_ids = {0: {0: [], 1: []},\n                      1: {0: list(range(0, degree+1))}}\n        lr = quadrature.GaussLegendreQuadratureLineRule(ref_el, degree+1)\n        nodes = [functional.PointEvaluation(ref_el, x) for x in lr.pts]\n\n        super(GaussLegendreDualSet, self).__init__(nodes, ref_el, entity_ids)\n\n\nclass GaussLegendre(finite_element.CiarletElement):\n    \"\"\"1D discontinuous element with nodes at the Gauss-Legendre points.\"\"\"\n    def __init__(self, ref_el, degree):\n        if ref_el.shape != LINE:\n            raise ValueError(\"Gauss-Legendre elements are only defined in one dimension.\")\n        poly_set = polynomial_set.ONPolynomialSet(ref_el, degree)\n        dual = GaussLegendreDualSet(ref_el, degree)\n        formdegree = ref_el.get_spatial_dimension()  # n-form\n        super(GaussLegendre, self).__init__(poly_set, dual, degree, formdegree)\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/gauss_legendre.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, null, null, 1, null, null, 1, 1, null, 1, 1, null, 1, null, null, 1, null, 1, 1, 0, 1, 1, 1, 1], "source": "# Copyright (C) 2015 Imperial College London and others.\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n#\n# Written by David A. Ham (david.ham@imperial.ac.uk), 2015\n\nfrom FIAT import finite_element, polynomial_set, dual_set, functional, quadrature\nfrom FIAT.reference_element import LINE\n\n\nclass GaussLobattoLegendreDualSet(dual_set.DualSet):\n    \"\"\"The dual basis for 1D continuous elements with nodes at the\n    Gauss-Lobatto points.\"\"\"\n    def __init__(self, ref_el, degree):\n        entity_ids = {0: {0: [0], 1: [degree]},\n                      1: {0: list(range(1, degree))}}\n        lr = quadrature.GaussLobattoLegendreQuadratureLineRule(ref_el, degree+1)\n        nodes = [functional.PointEvaluation(ref_el, x) for x in lr.pts]\n\n        super(GaussLobattoLegendreDualSet, self).__init__(nodes, ref_el, entity_ids)\n\n\nclass GaussLobattoLegendre(finite_element.CiarletElement):\n    \"\"\"1D continuous element with nodes at the Gauss-Lobatto points.\"\"\"\n    def __init__(self, ref_el, degree):\n        if ref_el.shape != LINE:\n            raise ValueError(\"Gauss-Lobatto-Legendre elements are only defined in one dimension.\")\n        poly_set = polynomial_set.ONPolynomialSet(ref_el, degree)\n        dual = GaussLobattoLegendreDualSet(ref_el, degree)\n        formdegree = 0  # 0-form\n        super(GaussLobattoLegendre, self).__init__(poly_set, dual, degree, formdegree)\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/gauss_lobatto_legendre.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, 1, 1, 1, 1, null, null, null, 1, null, null, 1, null, null, 1, null, null, null, 1, 1, 1, null, null, 1, null, null, null, null, null, null, null, null, null, 1, null, null, null, null, null, null, null, null, 1, 1, 0, null, null, 1, 1, 1, 0, 0, null, 1, null, null, null, 1, 0, null, null, null, 1, 0, null, null, null, 1, 1, 1, 1, 1, 1, 1, null, null, 1, 1, null, 1, 1, null, null, null, 1, 1, 1, 1, 1, 1, null, 1, 1, 1, null, null, 1, 1, 1, 1, null, null, 1, 1, null, null, 1, null, 1, null, null, null, null, 1, null, null, 1, null, 1, null, 0, null, 1, null, null, 1, null, 1, null, null, 0, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, null, null, 1, 1, 1, 1, 1, null, 1, null, null, null, 1, null, null, 1, 0, null, null, null, null, null, 1, 1, 1, null, null, 1, 0, 0, null, 0, null, null, null, null, 1, null, null, 1, 1, 1, 1, null, null, 1, null, null, null, 1, 1, 1, 1, null, 1, null, null, null, 1, 1, 1, 1, 1, null, null, null, 1, null, 1, 1, 1, null, 1, null, null, null, 1, 1, 1, 1, 1, null, null, 1, null, 1, null, 1, null, 0, null, 1, null, null, 0, null, 1, null, 0, null, 1, null, 0, null, null, 1, null, null, null, 1, 1, null, null, null, 1, 0, 0, 0, null, null, 1, 1, null, null, null, 1, null, null, null, 1, 0, null, 1, null, null, 0, null, null, null, 1, null, null, null, 1, null, null, null, null, null, null, null, 1, 1, 1, 1, null, 1, 1, 1, null, null, 1, 0, null, null, 1, null, null, 1, null, null, null, null, null, null, null, null, 1, 1, 1, null, null, 1, 1, 1, 1, 1, 1, 1, 1, null, null, 1, null, null, null, null, null, null, null, null, 0, 0, 0, null, null, 0, 0, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, 1, null, null, 1, 1, null, 1, 1, null, null, 1, null, null, 1, null, null, 1, 1], "source": "# Copyright (C) 2016 Thomas H. Gibson\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nimport numpy as np\nfrom FIAT.discontinuous_lagrange import DiscontinuousLagrange\nfrom FIAT.dual_set import DualSet\nfrom FIAT.finite_element import FiniteElement\nfrom FIAT.functional import PointEvaluation\nfrom FIAT.polynomial_set import mis\nfrom FIAT.reference_element import (ufc_simplex, POINT,\n                                    LINE, QUADRILATERAL,\n                                    TRIANGLE, TETRAHEDRON,\n                                    TENSORPRODUCT)\nfrom FIAT.tensor_product import TensorProductElement\n\n# Numerical tolerance for facet-entity identifications\nepsilon = 1e-10\n\n\nclass TraceError(Exception):\n    \"\"\"Exception caused by tabulating a trace element on the interior of a cell,\n    or the gradient of a trace element.\"\"\"\n\n    def __init__(self, msg):\n        super(TraceError, self).__init__(msg)\n        self.msg = msg\n\n\nclass HDivTrace(FiniteElement):\n    \"\"\"Class implementing the trace of hdiv elements. This class\n    is a stand-alone element family that produces a DG-facet field.\n    This element is what's produced after performing the trace\n    operation on an existing H(Div) element.\n\n    This element is also known as the discontinuous trace field that\n    arises in several DG formulations.\n    \"\"\"\n\n    def __init__(self, ref_el, degree):\n        \"\"\"Constructor for the HDivTrace element.\n\n        :arg ref_el: A reference element, which may be a tensor product\n                     cell.\n        :arg degree: The degree of approximation. If on a tensor product\n                     cell, then provide a tuple of degrees if you want\n                     varying degrees.\n        \"\"\"\n        sd = ref_el.get_spatial_dimension()\n        if sd in (0, 1):\n            raise ValueError(\"Cannot take the trace of a %d-dim cell.\" % sd)\n\n        # Store the degrees if on a tensor product cell\n        if ref_el.get_shape() == TENSORPRODUCT:\n            try:\n                degree = tuple(degree)\n            except TypeError:\n                degree = (degree,) * len(ref_el.cells)\n\n            assert len(ref_el.cells) == len(degree), (\n                \"Number of specified degrees must be equal to the number of cells.\"\n            )\n        else:\n            if ref_el.get_shape() not in [TRIANGLE, TETRAHEDRON, QUADRILATERAL]:\n                raise NotImplementedError(\n                    \"Trace element on a %s not implemented\" % type(ref_el)\n                )\n            # Cannot have varying degrees for these reference cells\n            if isinstance(degree, tuple):\n                raise ValueError(\"Must have a tensor product cell if providing multiple degrees\")\n\n        # Initialize entity dofs and construct the DG elements\n        # for the facets\n        facet_sd = sd - 1\n        dg_elements = {}\n        entity_dofs = {}\n        topology = ref_el.get_topology()\n        for top_dim, entities in topology.items():\n            cell = ref_el.construct_subelement(top_dim)\n            entity_dofs[top_dim] = {}\n\n            # We have a facet entity!\n            if cell.get_spatial_dimension() == facet_sd:\n                dg_elements[top_dim] = construct_dg_element(cell, degree)\n            # Initialize\n            for entity in entities:\n                entity_dofs[top_dim][entity] = []\n\n        # Compute the dof numbering for all facet entities\n        # and extract nodes\n        offset = 0\n        pts = []\n        for facet_dim in sorted(dg_elements):\n            element = dg_elements[facet_dim]\n            nf = element.space_dimension()\n            num_facets = len(topology[facet_dim])\n\n            for i in range(num_facets):\n                entity_dofs[facet_dim][i] = list(range(offset, offset + nf))\n                offset += nf\n\n                # Run over nodes and collect the points for point evaluations\n                for dof in element.dual_basis():\n                    facet_pt, = dof.get_point_dict()\n                    transform = ref_el.get_entity_transform(facet_dim, i)\n                    pts.append(tuple(transform(facet_pt)))\n\n        # Setting up dual basis - only point evaluations\n        nodes = [PointEvaluation(ref_el, pt) for pt in pts]\n        dual = DualSet(nodes, ref_el, entity_dofs)\n\n        # Degree of the element\n        deg = max([e.degree() for e in dg_elements.values()])\n\n        super(HDivTrace, self).__init__(ref_el, dual, order=deg,\n                                        formdegree=facet_sd,\n                                        mapping=\"affine\")\n\n        # Set up facet elements\n        self.dg_elements = dg_elements\n\n        # Degree for quadrature rule\n        self.polydegree = deg\n\n    def degree(self):\n        \"\"\"Return the degree of the (embedding) polynomial space.\"\"\"\n        return self.polydegree\n\n    def get_nodal_basis(self):\n        \"\"\"Return the nodal basis, encoded as a PolynomialSet object,\n        for the finite element.\"\"\"\n        raise NotImplementedError(\"get_nodal_basis not implemented for the trace element.\")\n\n    def get_coeffs(self):\n        \"\"\"Return the expansion coefficients for the basis of the\n        finite element.\"\"\"\n        raise NotImplementedError(\"get_coeffs not implemented for the trace element.\")\n\n    def tabulate(self, order, points, entity=None):\n        \"\"\"Return tabulated values of derivatives up to a given order of\n        basis functions at given points.\n\n        :arg order: The maximum order of derivative.\n        :arg points: An iterable of points.\n        :arg entity: Optional (dimension, entity number) pair\n                     indicating which topological entity of the\n                     reference element to tabulate on.  If ``None``,\n                     tabulated values are computed by geometrically\n                     approximating which facet the points are on.\n\n        .. note ::\n\n           Performing illegal tabulations on this element will result in either\n           a tabulation table of `numpy.nan` arrays (`entity=None` case), or\n           insertions of the `TraceError` exception class. This is due to the\n           fact that performing cell-wise tabulations, or asking for any order\n           of derivative evaluations, are not mathematically well-defined.\n        \"\"\"\n        sd = self.ref_el.get_spatial_dimension()\n        facet_sd = sd - 1\n\n        # Initializing dictionary with zeros\n        phivals = {}\n        for i in range(order + 1):\n            alphas = mis(sd, i)\n            for alpha in alphas:\n                phivals[alpha] = np.zeros(shape=(self.space_dimension(), len(points)))\n\n        evalkey = (0,) * sd\n\n        # If entity is None, identify facet using numerical tolerance and\n        # return the tabulated values\n        if entity is None:\n            # NOTE: Numerical approximation of the facet id is currently only\n            # implemented for simplex reference cells.\n            if self.ref_el.get_shape() not in [TRIANGLE, TETRAHEDRON]:\n                raise NotImplementedError(\n                    \"Tabulating this element on a %s cell without providing \"\n                    \"an entity is not currently supported.\" % type(self.ref_el)\n                )\n\n            # Attempt to identify which facet (if any) the given points are on\n            vertices = self.ref_el.vertices\n            coordinates = barycentric_coordinates(points, vertices)\n            unique_facet, success = extract_unique_facet(coordinates)\n\n            # If not successful, return NaNs\n            if not success:\n                for key in phivals:\n                    phivals[key] = np.full(shape=(self.space_dimension(), len(points)), fill_value=np.nan)\n\n                return phivals\n\n            # Otherwise, extract non-zero values and insertion indices\n            else:\n                # Map points to the reference facet\n                new_points = map_to_reference_facet(points, vertices, unique_facet)\n\n                # Retrieve values by tabulating the DG element\n                element = self.dg_elements[facet_sd]\n                nf = element.space_dimension()\n                nonzerovals, = element.tabulate(order, new_points).values()\n                indices = slice(nf * unique_facet, nf * (unique_facet + 1))\n\n        else:\n            entity_dim, _ = entity\n\n            # If the user is directly specifying cell-wise tabulation, return\n            # TraceErrors in dict for appropriate handling in the form compiler\n            if entity_dim not in self.dg_elements:\n                for key in phivals:\n                    msg = \"The HDivTrace element can only be tabulated on facets.\"\n                    phivals[key] = TraceError(msg)\n\n                return phivals\n\n            else:\n                # Retrieve function evaluations (order = 0 case)\n                offset = 0\n                for facet_dim in sorted(self.dg_elements):\n                    element = self.dg_elements[facet_dim]\n                    nf = element.space_dimension()\n                    num_facets = len(self.ref_el.get_topology()[facet_dim])\n\n                    # Loop over the number of facets until we find a facet\n                    # with matching dimension and id\n                    for i in range(num_facets):\n                        # Found it! Grab insertion indices\n                        if (facet_dim, i) == entity:\n                            nonzerovals, = element.tabulate(0, points).values()\n                            indices = slice(offset, offset + nf)\n\n                        offset += nf\n\n        # If asking for gradient evaluations, insert TraceError in\n        # gradient slots\n        if order > 0:\n            msg = \"Gradients on trace elements are not well-defined.\"\n            for key in phivals:\n                if key != evalkey:\n                    phivals[key] = TraceError(msg)\n\n        # Insert non-zero values in appropriate place\n        phivals[evalkey][indices, :] = nonzerovals\n\n        return phivals\n\n    def value_shape(self):\n        \"\"\"Return the value shape of the finite element functions.\"\"\"\n        return ()\n\n    def dmats(self):\n        \"\"\"Return dmats: expansion coefficients for basis function\n        derivatives.\"\"\"\n        raise NotImplementedError(\"dmats not implemented for the trace element.\")\n\n    def get_num_members(self, arg):\n        \"\"\"Return number of members of the expansion set.\"\"\"\n        raise NotImplementedError(\"get_num_members not implemented for the trace element.\")\n\n    @staticmethod\n    def is_nodal():\n        return True\n\n\ndef construct_dg_element(ref_el, degree):\n    \"\"\"Constructs a discontinuous galerkin element of a given degree\n    on a particular reference cell.\n    \"\"\"\n    if ref_el.get_shape() in [LINE, TRIANGLE]:\n        dg_element = DiscontinuousLagrange(ref_el, degree)\n\n    # Quadrilateral facets could be on a FiredrakeQuadrilateral.\n    # In this case, we treat this as an interval x interval cell:\n    elif ref_el.get_shape() == QUADRILATERAL:\n        dg_a = DiscontinuousLagrange(ufc_simplex(1), degree)\n        dg_b = DiscontinuousLagrange(ufc_simplex(1), degree)\n        dg_element = TensorProductElement(dg_a, dg_b)\n\n    # This handles the more general case for facets:\n    elif ref_el.get_shape() == TENSORPRODUCT:\n        assert len(degree) == len(ref_el.cells), (\n            \"Must provide the same number of degrees as the number \"\n            \"of cells that make up the tensor product cell.\"\n        )\n        sub_elements = [construct_dg_element(c, d)\n                        for c, d in zip(ref_el.cells, degree)\n                        if c.get_shape() != POINT]\n\n        if len(sub_elements) > 1:\n            dg_element = TensorProductElement(*sub_elements)\n        else:\n            dg_element, = sub_elements\n\n    else:\n        raise NotImplementedError(\n            \"Reference cells of type %s not currently supported\" % type(ref_el)\n        )\n\n    return dg_element\n\n\n# The following functions are credited to Marie E. Rognes:\ndef extract_unique_facet(coordinates, tolerance=epsilon):\n    \"\"\"Determines whether a set of points (described in its barycentric coordinates)\n    are all on one of the facet sub-entities, and return the particular facet and\n    whether the search has been successful.\n\n    :arg coordinates: A set of points described in barycentric coordinates.\n    :arg tolerance: A fixed tolerance for geometric identifications.\n    \"\"\"\n    facets = []\n    for c in coordinates:\n        on_facet = set([i for (i, l) in enumerate(c) if abs(l) < tolerance])\n        facets += [on_facet]\n\n    unique_facet = facets[0]\n    for f in facets:\n        unique_facet = unique_facet & f\n\n    # Handle coordinates not on facets\n    if len(unique_facet) != 1:\n        return (None, False)\n\n    # If we have a unique facet, return it and success\n    return (unique_facet.pop(), True)\n\n\ndef barycentric_coordinates(points, vertices):\n    \"\"\"Computes the barycentric coordinates for a set of points relative to a\n    simplex defined by a set of vertices.\n\n    :arg points: A set of points.\n    :arg vertices: A set of vertices that define the simplex.\n    \"\"\"\n\n    # Form mapping matrix\n    last = np.asarray(vertices[-1])\n    T = np.matrix([np.array(v) - last for v in vertices[:-1]]).T\n    invT = np.linalg.inv(T)\n\n    # Compute the barycentric coordinates for all points\n    coords = []\n    for p in points:\n        y = np.asarray(p) - last\n        bary = invT.dot(y.T)\n        bary = [bary[(0, i)] for i in range(len(y))]\n        bary += [1.0 - sum(bary)]\n        coords.append(bary)\n    return coords\n\n\ndef map_from_reference_facet(point, vertices):\n    \"\"\"Evaluates the physical coordinate of a point using barycentric\n    coordinates.\n\n    :arg point: The reference points to be mapped to the facet.\n    :arg vertices: The vertices defining the physical element.\n    \"\"\"\n\n    # Compute the barycentric coordinates of the point relative to the reference facet\n    reference_simplex = ufc_simplex(len(vertices) - 1)\n    reference_vertices = reference_simplex.get_vertices()\n    coords = barycentric_coordinates([point, ], reference_vertices)[0]\n\n    # Evaluates the physical coordinate of the point using barycentric coordinates\n    point = sum(vertices[j] * coords[j] for j in range(len(coords)))\n    return tuple(point)\n\n\ndef map_to_reference_facet(points, vertices, facet):\n    \"\"\"Given a set of points and vertices describing a facet of a simplex in n-dimensional\n    coordinates (where the points lie on the facet), map the points to the reference simplex\n    of dimension (n-1).\n\n    :arg points: A set of points in n-D.\n    :arg vertices: A set of vertices describing a facet of a simplex in n-D.\n    :arg facet: Integer representing the facet number.\n    \"\"\"\n\n    # Compute the barycentric coordinates of the points with respect to the\n    # full physical simplex\n    all_coords = barycentric_coordinates(points, vertices)\n\n    # Extract vertices of the reference facet\n    reference_facet_simplex = ufc_simplex(len(vertices) - 2)\n    reference_vertices = reference_facet_simplex.get_vertices()\n\n    reference_points = []\n    for (i, coords) in enumerate(all_coords):\n        # Extract the correct subset of barycentric coordinates since we know\n        # which facet we are on\n        new_coords = [coords[j] for j in range(len(coords)) if j != facet]\n\n        # Evaluate the reference coordinate of a point in barycentric coordinates\n        reference_pt = sum(np.asarray(reference_vertices[j]) * new_coords[j]\n                           for j in range(len(new_coords)))\n\n        reference_points += [reference_pt]\n    return reference_points\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/hdiv_trace.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, 1, null, null, 1, 1, 0, null, 1, 0, 1, 1, 0, null, 1, null, null, 1, null, 1, 1, null, null, 1, null, null, 1, null, null, 1, null, null, 1, null, 1, null, null, null, null, 1, 1, 1, 1, 1, null, 1, 1, null, null, null, null, 1, null, null, null, null, null, null, 1, 1, null, 1, null, 0, null, 1, 1, 1, null, null, null, 1, null, 1, 0, null, 0, null, 0, null, 1, 1, null, null, null, null, 1, 1, 1, 0, 1, null, 1, 1, 1, 0, 0, 0, 0, 0, null, 0, 0, 0, null, 0, 1, 1, null, 1, null, null, null, 1, 1, 1, 1, 1, 1, null, 0, 1, null, 1, null, null, 1, 1, 0, null, 1, 0, 1, 1, 0, null, 1, null, null, 1, null, 1, 1, null, null, 1, null, null, 1, null, null, 1, null, null, 1, null, 1, null, null, null, null, 1, 1, 1, 1, 1, null, 1, 1, null, null, null, null, 1, null, null, null, null, null, null, 1, 1, null, 1, null, 0, null, 1, 1, 1, null, null, null, 1, null, 1, 0, null, 0, null, 0, null, 1, 1, null, null, null, null, 1, 1, 1, 0, 1, null, 1, 1, 1, 0, 0, 0, 0, 0, null, 0, 0, 0, null, 0, 1, 1, null, 1, null, null, null, 1, 1, 1, 1, 1, 1, null, 0, 1, null, 1], "source": "# Copyright (C) 2013 Andrew T. T. McRae (Imperial College London)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nimport numpy\nimport types\nfrom FIAT.tensor_product import TensorProductElement\nfrom FIAT import functional\n\n\ndef Hdiv(element):\n    if not isinstance(element, TensorProductElement):\n        raise NotImplementedError\n\n    if element.A.get_formdegree() is None or element.B.get_formdegree() is None:\n        raise ValueError(\"form degree of sub-element was None (not set during initialisation), Hdiv cannot be done without this information\")\n    formdegree = element.A.get_formdegree() + element.B.get_formdegree()\n    if formdegree != element.get_reference_element().get_spatial_dimension() - 1:\n        raise ValueError(\"Tried to use Hdiv on a non-(n-1)-form element\")\n\n    newelement = TensorProductElement(element.A, element.B)  # make a copy to return\n\n    # redefine value_shape()\n    def value_shape(self):\n        \"Return the value shape of the finite element functions.\"\n        return (self.get_reference_element().get_spatial_dimension(),)\n    newelement.value_shape = types.MethodType(value_shape, newelement)\n\n    # store old _mapping\n    newelement._oldmapping = newelement._mapping\n\n    # redefine _mapping\n    newelement._mapping = \"contravariant piola\"\n\n    # store formdegree\n    newelement.formdegree = formdegree\n\n    # redefine tabulate\n    newelement.old_tabulate = newelement.tabulate\n\n    def tabulate(self, order, points, entity=None):\n        \"\"\"Return tabulated values of derivatives up to given order of\n        basis functions at given points.\"\"\"\n\n        # don't duplicate what the old function does fine...\n        old_result = self.old_tabulate(order, points, entity)\n        new_result = {}\n        sd = self.get_reference_element().get_spatial_dimension()\n        for alpha in old_result.keys():\n            temp_old = old_result[alpha]\n\n            if self._oldmapping == \"affine\":\n                temp = numpy.zeros((temp_old.shape[0], sd, temp_old.shape[1]), dtype=temp_old.dtype)\n                # both constituents affine, i.e., they were 0 forms or n-forms.\n                # to sum to n-1, we must have \"0-form on an interval\" crossed\n                # with something discontinuous.\n                # look for the (continuous) 0-form, and put the value there\n                if self.A.get_formdegree() == 0:\n                    # first element, so (-x, 0, ...)\n                    # Sign flip to ensure that a positive value of the node\n                    # means a vector field having a direction \"to the left\"\n                    # relative to direction in which the nodes are placed on an\n                    # edge in case of higher-order schemes.\n                    # This is required for unstructured quadrilateral meshes.\n                    temp[:, 0, :] = -temp_old[:, :]\n                elif self.B.get_formdegree() == 0:\n                    # second element, so (..., 0, x)\n                    temp[:, -1, :] = temp_old[:, :]\n                else:\n                    raise Exception(\"Hdiv affine/affine form degrees broke\")\n\n            elif self._oldmapping == \"contravariant piola\":\n                temp = numpy.zeros((temp_old.shape[0], sd, temp_old.shape[2]), dtype=temp_old.dtype)\n                Asd = self.A.get_reference_element().get_spatial_dimension()\n                # one component is affine, one is contravariant piola\n                # the affine one must be an n-form, hence discontinuous\n                # this component/these components get zeroed out\n                if element.A.mapping()[0] == \"contravariant piola\":\n                    # first element, so (x1, ..., xn, 0, ...)\n                    temp[:, :Asd, :] = temp_old[:, :, :]\n                elif element.B.mapping()[0] == \"contravariant piola\":\n                    # second element, so (..., 0, x1, ..., xn)\n                    temp[:, Asd:, :] = temp_old[:, :, :]\n                else:\n                    raise ValueError(\"Hdiv contravariant piola couldn't find an existing ConPi subelement\")\n\n            elif self._oldmapping == \"covariant piola\":\n                temp = numpy.zeros((temp_old.shape[0], sd, temp_old.shape[2]), dtype=temp_old.dtype)\n                # one component is affine, one is covariant piola\n                # the affine one must be an n-form, hence discontinuous\n                # this component/these components get zeroed out\n                # the remaining part gets perped\n                if element.A.mapping()[0] == \"covariant piola\":\n                    Asd = self.A.get_reference_element().get_spatial_dimension()\n                    if not Asd == 2:\n                        raise ValueError(\"Must be 2d shape to automatically convert covariant to contravariant\")\n                    temp_perp = numpy.zeros(temp_old.shape, dtype=temp_old.dtype)\n                    # first element, so (x2, -x1, 0, ...)\n                    temp_perp[:, 0, :] = temp_old[:, 1, :]\n                    temp_perp[:, 1, :] = -temp_old[:, 0, :]\n                    temp[:, :Asd, :] = temp_perp[:, :, :]\n                elif element.B.mapping()[0] == \"covariant piola\":\n                    Bsd = self.B.get_reference_element().get_spatial_dimension()\n                    if not Bsd == 2:\n                        raise ValueError(\"Must be 2d shape to automatically convert covariant to contravariant\")\n                    temp_perp = numpy.zeros(temp_old.shape, dtype=temp_old.dtype)\n                    # second element, so (..., 0, x2, -x1)\n                    temp_perp[:, 0, :] = temp_old[:, 1, :]\n                    temp_perp[:, 1, :] = -temp_old[:, 0, :]\n                    temp[:, Asd:, :] = temp_old[:, :, :]\n                else:\n                    raise ValueError(\"Hdiv covariant piola couldn't find an existing CovPi subelement\")\n            new_result[alpha] = temp\n        return new_result\n\n    newelement.tabulate = types.MethodType(tabulate, newelement)\n\n    # splat any PointEvaluation functionals.\n    # they become a nasty mix of internal and external component DOFs\n    if newelement._oldmapping == \"affine\":\n        oldnodes = newelement.dual.nodes\n        newnodes = []\n        for node in oldnodes:\n            if isinstance(node, functional.PointEvaluation):\n                newnodes.append(functional.Functional(None, None, None, {}, \"Undefined\"))\n            else:\n                newnodes.append(node)\n        newelement.dual.nodes = newnodes\n\n    return newelement\n\n\ndef Hcurl(element):\n    if not isinstance(element, TensorProductElement):\n        raise NotImplementedError\n\n    if element.A.get_formdegree() is None or element.B.get_formdegree() is None:\n        raise ValueError(\"form degree of sub-element was None (not set during initialisation), Hcurl cannot be done without this information\")\n    formdegree = element.A.get_formdegree() + element.B.get_formdegree()\n    if not (formdegree == 1):\n        raise ValueError(\"Tried to use Hcurl on a non-1-form element\")\n\n    newelement = TensorProductElement(element.A, element.B)  # make a copy to return\n\n    # redefine value_shape()\n    def value_shape(self):\n        \"Return the value shape of the finite element functions.\"\n        return (self.get_reference_element().get_spatial_dimension(),)\n    newelement.value_shape = types.MethodType(value_shape, newelement)\n\n    # store old _mapping\n    newelement._oldmapping = newelement._mapping\n\n    # redefine _mapping\n    newelement._mapping = \"covariant piola\"\n\n    # store formdegree\n    newelement.formdegree = formdegree\n\n    # redefine tabulate\n    newelement.old_tabulate = newelement.tabulate\n\n    def tabulate(self, order, points, entity=None):\n        \"\"\"Return tabulated values of derivatives up to given order of\n        basis functions at given points.\"\"\"\n\n        # don't duplicate what the old function does fine...\n        old_result = self.old_tabulate(order, points, entity)\n        new_result = {}\n        sd = self.get_reference_element().get_spatial_dimension()\n        for alpha in old_result.keys():\n            temp_old = old_result[alpha]\n\n            if self._oldmapping == \"affine\":\n                temp = numpy.zeros((temp_old.shape[0], sd, temp_old.shape[1]), dtype=temp_old.dtype)\n                # both constituents affine, i.e., they were 0 forms or n-forms.\n                # to sum to 1, we must have \"1-form on an interval\" crossed with\n                # a bunch of 0-forms (continuous).\n                # look for the 1-form, and put the value in the other place\n                if self.A.get_formdegree() == 1:\n                    # first element, so (x, 0, ...)\n                    # No sign flip here, nor at the other branch, to ensure that\n                    # a positive value of the node means a vector field having\n                    # the same direction as the direction in which the nodes are\n                    # placed on an edge in case of higher-order schemes.\n                    # This is required for unstructured quadrilateral meshes.\n                    temp[:, 0, :] = temp_old[:, :]\n                elif self.B.get_formdegree() == 1:\n                    # second element, so (..., 0, x)\n                    temp[:, -1, :] = temp_old[:, :]\n                else:\n                    raise Exception(\"Hcurl affine/affine form degrees broke\")\n\n            elif self._oldmapping == \"covariant piola\":\n                temp = numpy.zeros((temp_old.shape[0], sd, temp_old.shape[2]), dtype=temp_old.dtype)\n                Asd = self.A.get_reference_element().get_spatial_dimension()\n                # one component is affine, one is covariant piola\n                # the affine one must be an 0-form, hence continuous\n                # this component/these components get zeroed out\n                if element.A.mapping()[0] == \"covariant piola\":\n                    # first element, so (x1, ..., xn, 0, ...)\n                    temp[:, :Asd, :] = temp_old[:, :, :]\n                elif element.B.mapping()[0] == \"covariant piola\":\n                    # second element, so (..., 0, x1, ..., xn)\n                    temp[:, Asd:, :] = temp_old[:, :, :]\n                else:\n                    raise ValueError(\"Hdiv contravariant piola couldn't find an existing ConPi subelement\")\n\n            elif self._oldmapping == \"contravariant piola\":\n                temp = numpy.zeros((temp_old.shape[0], sd, temp_old.shape[2]), dtype=temp_old.dtype)\n                # one component is affine, one is contravariant piola\n                # the affine one must be an 0-form, hence continuous\n                # this component/these components get zeroed out\n                # the remaining part gets perped\n                if element.A.mapping()[0] == \"contravariant piola\":\n                    Asd = self.A.get_reference_element().get_spatial_dimension()\n                    if not Asd == 2:\n                        raise ValueError(\"Must be 2d shape to automatically convert contravariant to covariant\")\n                    temp_perp = numpy.zeros(temp_old.shape, dtype=temp_old.dtype)\n                    # first element, so (-x2, x1, 0, ...)\n                    temp_perp[:, 0, :] = -temp_old[:, 1, :]\n                    temp_perp[:, 1, :] = temp_old[:, 0, :]\n                    temp[:, :Asd, :] = temp_perp[:, :, :]\n                elif element.B.mapping()[0] == \"contravariant piola\":\n                    Bsd = self.B.get_reference_element().get_spatial_dimension()\n                    if not Bsd == 2:\n                        raise ValueError(\"Must be 2d shape to automatically convert contravariant to covariant\")\n                    temp_perp = numpy.zeros(temp_old.shape, dtype=temp_old.dtype)\n                    # second element, so (..., 0, -x2, x1)\n                    temp_perp[:, 0, :] = -temp_old[:, 1, :]\n                    temp_perp[:, 1, :] = temp_old[:, 0, :]\n                    temp[:, Asd:, :] = temp_old[:, :, :]\n                else:\n                    raise ValueError(\"Hcurl contravariant piola couldn't find an existing CovPi subelement\")\n            new_result[alpha] = temp\n        return new_result\n\n    newelement.tabulate = types.MethodType(tabulate, newelement)\n\n    # splat any PointEvaluation functionals.\n    # they become a nasty mix of internal and external component DOFs\n    if newelement._oldmapping == \"affine\":\n        oldnodes = newelement.dual.nodes\n        newnodes = []\n        for node in oldnodes:\n            if isinstance(node, functional.PointEvaluation):\n                newnodes.append(functional.Functional(None, None, None, {}, \"Undefined\"))\n            else:\n                newnodes.append(node)\n        newelement.dual.nodes = newnodes\n\n    return newelement\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/hdivcurl.py"}, {"coverage": [null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, 1, 1, null, null, 1, null, 1, 1, 1, 0, null, null, null, 1, null, null, 1, null, null, 1, null, 1, 1, 1, null, 1, 1, 1, null, 1, null, 1, null, null, null, null, null, null, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, null, 1, null, null, null, null, null, null, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, null, null, 1, null, null, null, null, 1, 1, null, 1, null, 1, null, 1, null, 1, null], "source": "# -*- coding: utf-8 -*-\n\"\"\"Implementation of the Hellan-Herrmann-Johnson finite elements.\"\"\"\n\n# Copyright (C) 2016-2018 Lizao Li <lzlarryli@gmail.com>\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nfrom FIAT.finite_element import CiarletElement\nfrom FIAT.dual_set import DualSet\nfrom FIAT.polynomial_set import ONSymTensorPolynomialSet\nfrom FIAT.functional import PointwiseInnerProductEvaluation as InnerProduct\nimport numpy\n\n\nclass HellanHerrmannJohnsonDual(DualSet):\n    \"\"\"Degrees of freedom for Hellan-Herrmann-Johnson elements.\"\"\"\n    def __init__(self, cell, degree):\n        dim = cell.get_spatial_dimension()\n        if not dim == 2:\n            raise ValueError(\"Hellan_Herrmann-Johnson elements are only\"\n                             \"defined in dimension 2.\")\n\n        # construct the degrees of freedoms\n        dofs = []               # list of functionals\n        # dof_ids[i][j] contains the indices of dofs that are associated with\n        # entity j in dim i\n        dof_ids = {}\n\n        # no vertex dof\n        dof_ids[0] = {i: [] for i in range(dim + 1)}\n        # edge dofs\n        (_dofs, _dof_ids) = self._generate_edge_dofs(cell, degree, 0)\n        dofs.extend(_dofs)\n        dof_ids[1] = _dof_ids\n        # cell dofs\n        (_dofs, _dof_ids) = self._generate_trig_dofs(cell, degree, len(dofs))\n        dofs.extend(_dofs)\n        dof_ids[dim] = _dof_ids\n\n        super(HellanHerrmannJohnsonDual, self).__init__(dofs, cell, dof_ids)\n\n    @staticmethod\n    def _generate_edge_dofs(cell, degree, offset):\n        \"\"\"generate dofs on edges.\n        On each edge, let n be its normal. For degree=r, the scalar function\n              n^T u n\n        is evaluated at points enough to control P(r).\n        \"\"\"\n        dofs = []\n        dof_ids = {}\n        for entity_id in range(3):                  # a triangle has 3 edges\n            pts = cell.make_points(1, entity_id, degree + 2)  # edges are 1D\n            normal = cell.compute_scaled_normal(entity_id)\n            dofs += [InnerProduct(cell, normal, normal, pt) for pt in pts]\n            num_new_dofs = len(pts)                 # 1 dof per point on edge\n            dof_ids[entity_id] = list(range(offset, offset + num_new_dofs))\n            offset += num_new_dofs\n        return (dofs, dof_ids)\n\n    @staticmethod\n    def _generate_trig_dofs(cell, degree, offset):\n        \"\"\"generate dofs on edges.\n        On each triangle, for degree=r, the three components\n              u11, u12, u22\n        are evaluated at points enough to control P(r-1).\n        \"\"\"\n        dofs = []\n        dof_ids = {}\n        pts = cell.make_points(2, 0, degree + 2)  # 2D trig #0\n        e1 = numpy.array([1.0, 0.0])              # euclidean basis 1\n        e2 = numpy.array([0.0, 1.0])              # euclidean basis 2\n        basis = [(e1, e1), (e1, e2), (e2, e2)]    # basis for symmetric matrix\n        for (v1, v2) in basis:\n            dofs += [InnerProduct(cell, v1, v2, pt) for pt in pts]\n        num_dofs = 3 * len(pts)                   # 3 dofs per trig\n        dof_ids[0] = list(range(offset, offset + num_dofs))\n        return (dofs, dof_ids)\n\n\nclass HellanHerrmannJohnson(CiarletElement):\n    \"\"\"The definition of Hellan-Herrmann-Johnson element. It is defined only in\n       dimension 2. It consists of piecewise polynomial symmetric-matrix-valued\n       functions of degree r or less with normal-normal continuity.\n    \"\"\"\n    def __init__(self, cell, degree):\n        assert degree >= 0, \"Hellan-Herrmann-Johnson starts at degree 0!\"\n        # shape functions\n        Ps = ONSymTensorPolynomialSet(cell, degree)\n        # degrees of freedom\n        Ls = HellanHerrmannJohnsonDual(cell, degree)\n        # mapping under affine transformation\n        mapping = \"double contravariant piola\"\n\n        super(HellanHerrmannJohnson, self).__init__(Ps, Ls, degree,\n                                                    mapping=mapping)\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/hellan_herrmann_johnson.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, null, 1, null, null, null, null, 1, 1, 1, 1, null, null, null, 1, 1, 1, null, null, null, 1, 1, 1, 1, 1, 1, 1, null, 1, null, 1, 1, null, null, null, null, null, 1, 1, 1, 1, null, 1, null, null, 1, 1, 1, 1, 1, 1, 1, null, 1, 1, 1, 1, null, 1, null, null, 1, null, null, 1, 1, 1, 1], "source": "# Copyright (C) 2008 Robert C. Kirby (Texas Tech University)\n# Modified 2017 by RCK\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nfrom FIAT import finite_element, polynomial_set, dual_set, functional\n\n\nclass CubicHermiteDualSet(dual_set.DualSet):\n    \"\"\"The dual basis for Lagrange elements.  This class works for\n    simplices of any dimension.  Nodes are point evaluation at\n    equispaced points.\"\"\"\n\n    def __init__(self, ref_el):\n        entity_ids = {}\n        nodes = []\n        cur = 0\n\n        # make nodes by getting points\n        # need to do this dimension-by-dimension, facet-by-facet\n        top = ref_el.get_topology()\n        verts = ref_el.get_vertices()\n        sd = ref_el.get_spatial_dimension()\n\n        # get jet at each vertex\n\n        entity_ids[0] = {}\n        for v in sorted(top[0]):\n            nodes.append(functional.PointEvaluation(ref_el, verts[v]))\n            pd = functional.PointDerivative\n            for i in range(sd):\n                alpha = [0] * sd\n                alpha[i] = 1\n\n                nodes.append(pd(ref_el, verts[v], alpha))\n\n            entity_ids[0][v] = list(range(cur, cur + 1 + sd))\n            cur += sd + 1\n\n        # now only have dofs at the barycenter, which is the\n        # maximal dimension\n        # no edge dof\n\n        entity_ids[1] = {}\n        for i in top[1]:\n            entity_ids\n            entity_ids[1][i] = []\n\n        if sd > 1:\n            # face dof\n            # point evaluation at barycenter\n            entity_ids[2] = {}\n            for f in sorted(top[2]):\n                pt = ref_el.make_points(2, f, 3)[0]\n                n = functional.PointEvaluation(ref_el, pt)\n                nodes.append(n)\n                entity_ids[2][f] = list(range(cur, cur + 1))\n                cur += 1\n\n            for dim in range(3, sd + 1):\n                entity_ids[dim] = {}\n                for facet in top[dim]:\n                    entity_ids[dim][facet] = []\n\n        super(CubicHermiteDualSet, self).__init__(nodes, ref_el, entity_ids)\n\n\nclass CubicHermite(finite_element.CiarletElement):\n    \"\"\"The cubic Hermite finite element.  It is what it is.\"\"\"\n\n    def __init__(self, ref_el):\n        poly_set = polynomial_set.ONPolynomialSet(ref_el, 3)\n        dual = CubicHermiteDualSet(ref_el)\n        super(CubicHermite, self).__init__(poly_set, dual, 3)\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/hermite.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, null, null, null, null, 1, null, null, 1, null, null, null, null, 1, 1, 1, 1, null, 1, 1, 1, 1, 1, 1, 1, 1, null, null, 1, null, 1, 1, 1, 1, 1, 1, 1, null, null, 1, null, null, null, null, null, 1, null, 1, 1, null, 1, null, 1, 1, null, 1, 1, 1, 1, 1, null, null, 1, null, 1, 1, 1, 1, null, 1, null, null, 1, null, 1, 0, null, 1, null, null, 1, null, null, null, null, null, 1, 1, 0, null, 1, 1, 1, 1], "source": "# Copyright (C) 2008 Robert C. Kirby (Texas Tech University)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"Several functions related to the one-dimensional jacobi polynomials:\nEvaluation, evaluation of derivatives, plus computation of the roots\nvia Newton's method.  These mainly are used in defining the expansion\nfunctions over the simplices and in defining quadrature\nrules over each domain.\"\"\"\n\nimport numpy\n\n\ndef eval_jacobi(a, b, n, x):\n    \"\"\"Evaluates the nth jacobi polynomial with weight parameters a,b at a\n    point x. Recurrence relations implemented from the pseudocode\n    given in Karniadakis and Sherwin, Appendix B\"\"\"\n\n    if 0 == n:\n        return 1.0\n    elif 1 == n:\n        return 0.5 * (a - b + (a + b + 2.0) * x)\n    else:  # 2 <= n\n        apb = a + b\n        pn2 = 1.0\n        pn1 = 0.5 * (a - b + (apb + 2.0) * x)\n        p = 0\n        for k in range(2, n + 1):\n            a1 = 2.0 * k * (k + apb) * (2.0 * k + apb - 2.0)\n            a2 = (2.0 * k + apb - 1.0) * (a * a - b * b)\n            a3 = (2.0 * k + apb - 2.0)  \\\n                * (2.0 * k + apb - 1.0) \\\n                * (2.0 * k + apb)\n            a4 = 2.0 * (k + a - 1.0) * (k + b - 1.0) \\\n                * (2.0 * k + apb)\n            a2 = a2 / a1\n            a3 = a3 / a1\n            a4 = a4 / a1\n            p = (a2 + a3 * x) * pn1 - a4 * pn2\n            pn2 = pn1\n            pn1 = p\n        return p\n\n\ndef eval_jacobi_batch(a, b, n, xs):\n    \"\"\"Evaluates all jacobi polynomials with weights a,b\n    up to degree n.  xs is a numpy.array of points.\n    Returns a two-dimensional array of points, where the\n    rows correspond to the Jacobi polynomials and the\n    columns correspond to the points.\"\"\"\n    result = numpy.zeros((n + 1, len(xs)), xs.dtype)\n    # hack to make sure AD type is propogated through\n    for ii in range(result.shape[1]):\n        result[0, ii] = 1.0 + xs[ii, 0] - xs[ii, 0]\n\n    xsnew = xs.reshape((-1,))\n\n    if n > 0:\n        result[1, :] = 0.5 * (a - b + (a + b + 2.0) * xsnew)\n\n        apb = a + b\n        for k in range(2, n + 1):\n            a1 = 2.0 * k * (k + apb) * (2.0 * k + apb - 2.0)\n            a2 = (2.0 * k + apb - 1.0) * (a * a - b * b)\n            a3 = (2.0 * k + apb - 2.0)  \\\n                * (2.0 * k + apb - 1.0) \\\n                * (2.0 * k + apb)\n            a4 = 2.0 * (k + a - 1.0) * (k + b - 1.0) \\\n                * (2.0 * k + apb)\n            a2 = a2 / a1\n            a3 = a3 / a1\n            a4 = a4 / a1\n            result[k, :] = (a2 + a3 * xsnew) * result[k-1, :] \\\n                - a4 * result[k-2, :]\n    return result\n\n\ndef eval_jacobi_deriv(a, b, n, x):\n    \"\"\"Evaluates the first derivative of P_{n}^{a,b} at a point x.\"\"\"\n    if n == 0:\n        return 0.0\n    else:\n        return 0.5 * (a + b + n + 1) * eval_jacobi(a + 1, b + 1, n - 1, x)\n\n\ndef eval_jacobi_deriv_batch(a, b, n, xs):\n    \"\"\"Evaluates the first derivatives of all jacobi polynomials with\n    weights a,b up to degree n.  xs is a numpy.array of points.\n    Returns a two-dimensional array of points, where the\n    rows correspond to the Jacobi polynomials and the\n    columns correspond to the points.\"\"\"\n    results = numpy.zeros((n + 1, len(xs)), \"d\")\n    if n == 0:\n        return results\n    else:\n        results[1:, :] = eval_jacobi_batch(a + 1, b + 1, n - 1, xs)\n    for j in range(1, n + 1):\n        results[j, :] *= 0.5 * (a + b + j + 1)\n    return results\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/jacobi.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, null, 1, null, null, null, null, 1, 1, 1, null, null, null, 1, null, 1, 1, 1, 1, 1, 1, null, 1, 1, 1, 1, null, 1, null, null, 1, null, null, 1, 1, 1, 1, 1], "source": "# Copyright (C) 2008 Robert C. Kirby (Texas Tech University)\n# Modified by Andrew T. T. McRae (Imperial College London)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nfrom FIAT import finite_element, polynomial_set, dual_set, functional\n\n\nclass LagrangeDualSet(dual_set.DualSet):\n    \"\"\"The dual basis for Lagrange elements.  This class works for\n    simplices of any dimension.  Nodes are point evaluation at\n    equispaced points.\"\"\"\n\n    def __init__(self, ref_el, degree):\n        entity_ids = {}\n        nodes = []\n\n        # make nodes by getting points\n        # need to do this dimension-by-dimension, facet-by-facet\n        top = ref_el.get_topology()\n\n        cur = 0\n        for dim in sorted(top):\n            entity_ids[dim] = {}\n            for entity in sorted(top[dim]):\n                pts_cur = ref_el.make_points(dim, entity, degree)\n                nodes_cur = [functional.PointEvaluation(ref_el, x)\n                             for x in pts_cur]\n                nnodes_cur = len(nodes_cur)\n                nodes += nodes_cur\n                entity_ids[dim][entity] = list(range(cur, cur + nnodes_cur))\n                cur += nnodes_cur\n\n        super(LagrangeDualSet, self).__init__(nodes, ref_el, entity_ids)\n\n\nclass Lagrange(finite_element.CiarletElement):\n    \"\"\"The Lagrange finite element.  It is what it is.\"\"\"\n\n    def __init__(self, ref_el, degree):\n        poly_set = polynomial_set.ONPolynomialSet(ref_el, degree)\n        dual = LagrangeDualSet(ref_el, degree)\n        formdegree = 0  # 0-form\n        super(Lagrange, self).__init__(poly_set, dual, degree, formdegree)\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/lagrange.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, 1, 1, null, 1, 1, null, null, 1, null, null, null, null, null, null, null, 1, 1, null, 1, 1, 0, 1, null, null, null, null, null, 1, null, 1, null, 1, 1, 1, null, 1, 0, null, 1, 0, null, 1, 0, null, 1, 0, null, 1, 1, null, 1, null, null, null, 0, null, 0, null, 0, 0, null, 0, 0, null, 0, 0, null, 0, 0, 0, 0, 0, 0, null, 0, 0, 0, 0, null, 0, null, 1, null, 1, null, null, 1, null, null, null, 1, null, 1, null, 1, 1, 1, 1, 1], "source": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2005-2010 Anders Logg\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nimport numpy\n\nfrom operator import add\nfrom functools import partial\n\nfrom FIAT.dual_set import DualSet\nfrom FIAT.finite_element import FiniteElement\n\n\nclass MixedElement(FiniteElement):\n    \"\"\"A FIAT-like representation of a mixed element.\n\n    :arg elements: An iterable of FIAT elements.\n    :arg ref_el: The reference element (optional).\n\n    This object offers tabulation of the concatenated basis function\n    tables along with an entity_dofs dict.\"\"\"\n    def __init__(self, elements, ref_el=None):\n        elements = tuple(elements)\n\n        cells = set(e.get_reference_element() for e in elements)\n        if ref_el is not None:\n            cells.add(ref_el)\n        ref_el, = cells\n\n        # These functionals are absolutely wrong, they all map from\n        # functions of the wrong shape, and potentially of different\n        # shapes.  However, they are wrong precisely as FFC hacks\n        # expect them to be. :(\n        nodes = [L for e in elements for L in e.dual_basis()]\n\n        entity_dofs = concatenate_entity_dofs(ref_el, elements)\n\n        dual = DualSet(nodes, ref_el, entity_dofs)\n        super(MixedElement, self).__init__(ref_el, dual, None, mapping=None)\n        self._elements = elements\n\n    def elements(self):\n        return self._elements\n\n    def num_sub_elements(self):\n        return len(self._elements)\n\n    def value_shape(self):\n        return (sum(numpy.prod(e.value_shape(), dtype=int) for e in self.elements()), )\n\n    def mapping(self):\n        return [m for e in self._elements for m in e.mapping()]\n\n    def get_nodal_basis(self):\n        raise NotImplementedError(\"get_nodal_basis not implemented\")\n\n    def tabulate(self, order, points, entity=None):\n        \"\"\"Tabulate a mixed element by appropriately splatting\n        together the tabulation of the individual elements.\n        \"\"\"\n        shape = (self.space_dimension(),) + self.value_shape() + (len(points),)\n\n        output = {}\n\n        sub_dims = [0] + list(e.space_dimension() for e in self.elements())\n        sub_cmps = [0] + list(numpy.prod(e.value_shape(), dtype=int)\n                              for e in self.elements())\n        irange = numpy.cumsum(sub_dims)\n        crange = numpy.cumsum(sub_cmps)\n\n        for i, e in enumerate(self.elements()):\n            table = e.tabulate(order, points, entity)\n\n            for d, tab in table.items():\n                try:\n                    arr = output[d]\n                except KeyError:\n                    arr = numpy.zeros(shape, dtype=tab.dtype)\n                    output[d] = arr\n\n                ir = irange[i:i+2]\n                cr = crange[i:i+2]\n                tab = tab.reshape(ir[1] - ir[0], cr[1] - cr[0], -1)\n                arr[slice(*ir), slice(*cr)] = tab\n\n        return output\n\n    def is_nodal(self):\n        \"\"\"True if primal and dual bases are orthogonal.\"\"\"\n        return all(e.is_nodal() for e in self._elements)\n\n\ndef concatenate_entity_dofs(ref_el, elements):\n    \"\"\"Combine the entity_dofs from a list of elements into a combined\n    entity_dof containing the information for the concatenated DoFs of\n    all the elements.\"\"\"\n    entity_dofs = {dim: {i: [] for i in entities}\n                   for dim, entities in ref_el.get_topology().items()}\n    offsets = numpy.cumsum([0] + list(e.space_dimension()\n                                      for e in elements), dtype=int)\n    for i, d in enumerate(e.entity_dofs() for e in elements):\n        for dim, dofs in d.items():\n            for ent, off in dofs.items():\n                entity_dofs[dim][ent] += list(map(partial(add, offsets[i]), off))\n    return entity_dofs\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/mixed.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, null, null, 1, null, null, null, null, 1, 1, 1, 1, null, null, null, 1, 1, 1, 0, null, null, null, 1, 1, 1, null, 1, 1, null, null, 1, 1, 1, 1, 1, 1, 1, null, 1, null, 1, null, null, 1, null, null, 1, 1, 1, 1], "source": "# Copyright (C) 2008 Robert C. Kirby (Texas Tech University)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nfrom FIAT import finite_element, polynomial_set, dual_set, functional\nfrom FIAT.reference_element import TRIANGLE\n\n\nclass MorleyDualSet(dual_set.DualSet):\n    \"\"\"The dual basis for Lagrange elements.  This class works for\n    simplices of any dimension.  Nodes are point evaluation at\n    equispaced points.\"\"\"\n\n    def __init__(self, ref_el):\n        entity_ids = {}\n        nodes = []\n        cur = 0\n\n        # make nodes by getting points\n        # need to do this dimension-by-dimension, facet-by-facet\n        top = ref_el.get_topology()\n        verts = ref_el.get_vertices()\n        if ref_el.get_shape() != TRIANGLE:\n            raise ValueError(\"Morley only defined on triangles\")\n\n        # vertex point evaluations\n\n        entity_ids[0] = {}\n        for v in sorted(top[0]):\n            nodes.append(functional.PointEvaluation(ref_el, verts[v]))\n\n            entity_ids[0][v] = [cur]\n            cur += 1\n\n        # edge dof -- normal at each edge midpoint\n        entity_ids[1] = {}\n        for e in sorted(top[1]):\n            pt = ref_el.make_points(1, e, 2)[0]\n            n = functional.PointNormalDerivative(ref_el, e, pt)\n            nodes.append(n)\n            entity_ids[1][e] = [cur]\n            cur += 1\n\n        entity_ids[2] = {0: []}\n\n        super(MorleyDualSet, self).__init__(nodes, ref_el, entity_ids)\n\n\nclass Morley(finite_element.CiarletElement):\n    \"\"\"The Morley finite element.\"\"\"\n\n    def __init__(self, ref_el):\n        poly_set = polynomial_set.ONPolynomialSet(ref_el, 2)\n        dual = MorleyDualSet(ref_el)\n        super(Morley, self).__init__(poly_set, dual, 2)\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/morley.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, 1, 1, null, null, 1, null, null, 1, 1, 0, null, 1, null, 1, 1, 1, null, 1, null, 1, null, 1, 1, null, 1, null, 1, 1, null, 1, null, 1, 1, null, 1, null, null, null, 1, 1, 1, 1, 1, null, 1, 1, 1, 1, 1, null, null, null, null, null, null, null, 1, null, null, null, null, null, null, 1, null, null, null, 1, null, 1, 1, 0, null, 1, null, 1, 1, 1, 1, null, 1, null, 1, null, 1, null, 1, null, null, 1, null, 1, null, 1, null, 1, 1, null, 1, null, 1, null, null, null, 1, 1, null, 1, 1, 1, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, 1, null, null, null, null, null, 1, null, null, 1, null, null, 1, 1, 1, 0, null, 1, null, 1, null, 1, null, null, 1, 1, 1, 1, 1, 1, null, null, 1, 1, 1, 1, 1, 1, null, 1, 1, 1, 1, 1, null, 1, null, null, 1, 1, 1, 1, null, 1, null, null, 1, null, 1, 1, 1, null, null, 1, 1, 1, null, 1, null, null, 1, null, null, 1, 1, 1, 0, null, 1, null, 1, null, null, 1, null, 1, null, 1, 1, 1, 1, 1, null, 1, 1, 1, 1, 1, 1, 1, 1, 1, null, 1, 1, 1, 1, 1, 1, null, 1, 1, 1, 1, 1, null, 1, null, 1, 1, 1, 1, null, 1, null, null, 1, 1, 1, 1, null, null, 1, 1, 1, 1, 1, null, 1, 1, 1, null, 1, null, null, 1, null, null, 1, null, 1, null, 1, 1, 1, 1, 1, 1, null, 0, 1, 1, null], "source": "# Copyright (C) 2008 Robert C. Kirby (Texas Tech University)\n# Modified by Andrew T. T. McRae (Imperial College London)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nfrom FIAT import (polynomial_set, expansions, quadrature, dual_set,\n                  finite_element, functional)\nfrom itertools import chain\nimport numpy\n\n\ndef NedelecSpace2D(ref_el, k):\n    \"\"\"Constructs a basis for the 2d H(curl) space of the first kind\n    which is (P_k)^2 + P_k rot( x )\"\"\"\n    sd = ref_el.get_spatial_dimension()\n    if sd != 2:\n        raise Exception(\"NedelecSpace2D requires 2d reference element\")\n\n    vec_Pkp1 = polynomial_set.ONPolynomialSet(ref_el, k + 1, (sd,))\n\n    dimPkp1 = expansions.polynomial_dimension(ref_el, k + 1)\n    dimPk = expansions.polynomial_dimension(ref_el, k)\n    dimPkm1 = expansions.polynomial_dimension(ref_el, k - 1)\n\n    vec_Pk_indices = list(chain(*(range(i * dimPkp1, i * dimPkp1 + dimPk)\n                                  for i in range(sd))))\n    vec_Pk_from_Pkp1 = vec_Pkp1.take(vec_Pk_indices)\n\n    Pkp1 = polynomial_set.ONPolynomialSet(ref_el, k + 1)\n    PkH = Pkp1.take(list(range(dimPkm1, dimPk)))\n\n    Q = quadrature.make_quadrature(ref_el, 2 * k + 2)\n\n    Qpts = numpy.array(Q.get_points())\n    Qwts = numpy.array(Q.get_weights())\n\n    zero_index = tuple([0 for i in range(sd)])\n\n    PkH_at_Qpts = PkH.tabulate(Qpts)[zero_index]\n    Pkp1_at_Qpts = Pkp1.tabulate(Qpts)[zero_index]\n\n    PkH_crossx_coeffs = numpy.zeros((PkH.get_num_members(),\n                                     sd,\n                                     Pkp1.get_num_members()), \"d\")\n\n    def rot_x_foo(a):\n        if a == 0:\n            return 1, 1.0\n        elif a == 1:\n            return 0, -1.0\n\n    for i in range(PkH.get_num_members()):\n        for j in range(sd):\n            (ind, sign) = rot_x_foo(j)\n            for k in range(Pkp1.get_num_members()):\n                PkH_crossx_coeffs[i, j, k] = sign * sum(Qwts * PkH_at_Qpts[i, :] * Qpts[:, ind] * Pkp1_at_Qpts[k, :])\n#                for l in range( len( Qpts ) ):\n#                    PkH_crossx_coeffs[i,j,k] += Qwts[ l ] \\\n#                                                * PkH_at_Qpts[i,l] \\\n#                                                * Qpts[l][ind] \\\n#                                                * Pkp1_at_Qpts[k,l] \\\n#                                                * sign\n\n    PkHcrossx = polynomial_set.PolynomialSet(ref_el,\n                                             k + 1,\n                                             k + 1,\n                                             vec_Pkp1.get_expansion_set(),\n                                             PkH_crossx_coeffs,\n                                             vec_Pkp1.get_dmats())\n\n    return polynomial_set.polynomial_set_union_normalized(vec_Pk_from_Pkp1,\n                                                          PkHcrossx)\n\n\ndef NedelecSpace3D(ref_el, k):\n    \"\"\"Constructs a nodal basis for the 3d first-kind Nedelec space\"\"\"\n    sd = ref_el.get_spatial_dimension()\n    if sd != 3:\n        raise Exception(\"NedelecSpace3D requires 3d reference element\")\n\n    vec_Pkp1 = polynomial_set.ONPolynomialSet(ref_el, k + 1, (sd,))\n\n    dimPkp1 = expansions.polynomial_dimension(ref_el, k + 1)\n    dimPk = expansions.polynomial_dimension(ref_el, k)\n    if k > 0:\n        dimPkm1 = expansions.polynomial_dimension(ref_el, k - 1)\n    else:\n        dimPkm1 = 0\n\n    vec_Pk_indices = list(chain(*(range(i * dimPkp1, i * dimPkp1 + dimPk)\n                                  for i in range(sd))))\n    vec_Pk = vec_Pkp1.take(vec_Pk_indices)\n\n    vec_Pke_indices = list(chain(*(range(i * dimPkp1 + dimPkm1, i * dimPkp1 + dimPk)\n                                   for i in range(sd))))\n\n    vec_Pke = vec_Pkp1.take(vec_Pke_indices)\n\n    Pkp1 = polynomial_set.ONPolynomialSet(ref_el, k + 1)\n\n    Q = quadrature.make_quadrature(ref_el, 2 * (k + 1))\n\n    Qpts = numpy.array(Q.get_points())\n    Qwts = numpy.array(Q.get_weights())\n\n    zero_index = tuple([0 for i in range(sd)])\n\n    PkCrossXcoeffs = numpy.zeros((vec_Pke.get_num_members(),\n                                  sd,\n                                  Pkp1.get_num_members()), \"d\")\n\n    Pke_qpts = vec_Pke.tabulate(Qpts)[zero_index]\n    Pkp1_at_Qpts = Pkp1.tabulate(Qpts)[zero_index]\n\n    for i in range(vec_Pke.get_num_members()):\n        for j in range(sd):  # vector components\n            qwts_cur_bf_val = (\n                Qpts[:, (j + 2) % 3] * Pke_qpts[i, (j + 1) % 3, :] -\n                Qpts[:, (j + 1) % 3] * Pke_qpts[i, (j + 2) % 3, :]) * Qwts\n            PkCrossXcoeffs[i, j, :] = numpy.dot(Pkp1_at_Qpts, qwts_cur_bf_val)\n#            for k in range( Pkp1.get_num_members() ):\n#                 PkCrossXcoeffs[i,j,k] = sum( Qwts * cur_bf_val * Pkp1_at_Qpts[k,:] )\n#                for l in range( len( Qpts ) ):\n#                    cur_bf_val = Qpts[l][(j+2)%3] \\\n#                                 * Pke_qpts[i,(j+1)%3,l] \\\n#                                 - Qpts[l][(j+1)%3] \\\n#                                 * Pke_qpts[i,(j+2)%3,l]\n#                    PkCrossXcoeffs[i,j,k] += Qwts[l] \\\n#                                             * cur_bf_val \\\n#                                             * Pkp1_at_Qpts[k,l]\n\n    PkCrossX = polynomial_set.PolynomialSet(ref_el,\n                                            k + 1,\n                                            k + 1,\n                                            vec_Pkp1.get_expansion_set(),\n                                            PkCrossXcoeffs,\n                                            vec_Pkp1.get_dmats())\n    return polynomial_set.polynomial_set_union_normalized(vec_Pk, PkCrossX)\n\n\nclass NedelecDual2D(dual_set.DualSet):\n    \"\"\"Dual basis for first-kind Nedelec in 2d \"\"\"\n\n    def __init__(self, ref_el, degree):\n        sd = ref_el.get_spatial_dimension()\n        if sd != 2:\n            raise Exception(\"Nedelec2D only works on triangles\")\n\n        nodes = []\n\n        t = ref_el.get_topology()\n\n        num_edges = len(t[1])\n\n        # edge tangents\n        for i in range(num_edges):\n            pts_cur = ref_el.make_points(1, i, degree + 2)\n            for j in range(len(pts_cur)):\n                pt_cur = pts_cur[j]\n                f = functional.PointEdgeTangentEvaluation(ref_el, i, pt_cur)\n                nodes.append(f)\n\n        # internal moments\n        if degree > 0:\n            Q = quadrature.make_quadrature(ref_el, 2 * (degree + 1))\n            qpts = Q.get_points()\n            Pkm1 = polynomial_set.ONPolynomialSet(ref_el, degree - 1)\n            zero_index = tuple([0 for i in range(sd)])\n            Pkm1_at_qpts = Pkm1.tabulate(qpts)[zero_index]\n\n            for d in range(sd):\n                for i in range(Pkm1_at_qpts.shape[0]):\n                    phi_cur = Pkm1_at_qpts[i, :]\n                    l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,))\n                    nodes.append(l_cur)\n\n        entity_ids = {}\n\n        # set to empty\n        for i in range(sd + 1):\n            entity_ids[i] = {}\n            for j in range(len(t[i])):\n                entity_ids[i][j] = []\n\n        cur = 0\n\n        # edges\n        num_edge_pts = len(ref_el.make_points(1, 0, degree + 2))\n\n        for i in range(len(t[1])):\n            entity_ids[1][i] = list(range(cur, cur + num_edge_pts))\n            cur += num_edge_pts\n\n        # moments against P_{degree-1} internally, if degree > 0\n        if degree > 0:\n            num_internal_dof = sd * Pkm1_at_qpts.shape[0]\n            entity_ids[2][0] = list(range(cur, cur + num_internal_dof))\n\n        super(NedelecDual2D, self).__init__(nodes, ref_el, entity_ids)\n\n\nclass NedelecDual3D(dual_set.DualSet):\n    \"\"\"Dual basis for first-kind Nedelec in 3d \"\"\"\n\n    def __init__(self, ref_el, degree):\n        sd = ref_el.get_spatial_dimension()\n        if sd != 3:\n            raise Exception(\"NedelecDual3D only works on tetrahedra\")\n\n        nodes = []\n\n        t = ref_el.get_topology()\n\n        # how many edges\n        num_edges = len(t[1])\n\n        for i in range(num_edges):\n            # points to specify P_k on each edge\n            pts_cur = ref_el.make_points(1, i, degree + 2)\n            for j in range(len(pts_cur)):\n                pt_cur = pts_cur[j]\n                f = functional.PointEdgeTangentEvaluation(ref_el, i, pt_cur)\n                nodes.append(f)\n\n        if degree > 0:  # face tangents\n            num_faces = len(t[2])\n            for i in range(num_faces):  # loop over faces\n                pts_cur = ref_el.make_points(2, i, degree + 2)\n                for j in range(len(pts_cur)):  # loop over points\n                    pt_cur = pts_cur[j]\n                    for k in range(2):  # loop over tangents\n                        f = functional.PointFaceTangentEvaluation(ref_el, i, k, pt_cur)\n                        nodes.append(f)\n\n        if degree > 1:  # internal moments\n            Q = quadrature.make_quadrature(ref_el, 2 * (degree + 1))\n            qpts = Q.get_points()\n            Pkm2 = polynomial_set.ONPolynomialSet(ref_el, degree - 2)\n            zero_index = tuple([0 for i in range(sd)])\n            Pkm2_at_qpts = Pkm2.tabulate(qpts)[zero_index]\n\n            for d in range(sd):\n                for i in range(Pkm2_at_qpts.shape[0]):\n                    phi_cur = Pkm2_at_qpts[i, :]\n                    f = functional.IntegralMoment(ref_el, Q, phi_cur, (d,))\n                    nodes.append(f)\n\n        entity_ids = {}\n        # set to empty\n        for i in range(sd + 1):\n            entity_ids[i] = {}\n            for j in range(len(t[i])):\n                entity_ids[i][j] = []\n\n        cur = 0\n\n        # edge dof\n        num_pts_per_edge = len(ref_el.make_points(1, 0, degree + 2))\n        for i in range(len(t[1])):\n            entity_ids[1][i] = list(range(cur, cur + num_pts_per_edge))\n            cur += num_pts_per_edge\n\n        # face dof\n        if degree > 0:\n            num_pts_per_face = len(ref_el.make_points(2, 0, degree + 2))\n            for i in range(len(t[2])):\n                entity_ids[2][i] = list(range(cur, cur + 2 * num_pts_per_face))\n                cur += 2 * num_pts_per_face\n\n        if degree > 1:\n            num_internal_dof = Pkm2_at_qpts.shape[0] * sd\n            entity_ids[3][0] = list(range(cur, cur + num_internal_dof))\n\n        super(NedelecDual3D, self).__init__(nodes, ref_el, entity_ids)\n\n\nclass Nedelec(finite_element.CiarletElement):\n    \"\"\"Nedelec finite element\"\"\"\n\n    def __init__(self, ref_el, q):\n\n        degree = q - 1\n\n        if ref_el.get_spatial_dimension() == 3:\n            poly_set = NedelecSpace3D(ref_el, degree)\n            dual = NedelecDual3D(ref_el, degree)\n        elif ref_el.get_spatial_dimension() == 2:\n            poly_set = NedelecSpace2D(ref_el, degree)\n            dual = NedelecDual2D(ref_el, degree)\n        else:\n            raise Exception(\"Not implemented\")\n        formdegree = 1  # 1-form\n        super(Nedelec, self).__init__(poly_set, dual, degree, formdegree,\n                                      mapping=\"covariant piola\")\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/nedelec.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, 1, 1, 1, 1, 1, 1, 1, 1, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, null, 1, null, null, 1, null, 1, null, null, 1, 1, null, null, 1, 1, null, null, 1, null, null, 1, 1, 1, null, null, 1, 1, null, 1, 1, null, null, 1, 1, 1, null, 1, null, 1, null, null, null, null, null, 1, 1, 1, null, null, 1, null, null, 1, null, null, 1, 1, null, 1, null, 1, null, null, null, 1, 1, null, null, 1, 1, 1, null, 1, 1, null, null, 1, 1, null, null, 1, 1, null, null, null, 1, 1, 1, null, null, null, 1, 1, 1, 1, null, null, null, null, null, 1, 1, 1, 1, 1, 1, 1, 1, null, null, null, null, null, 1, null, null, 1, null, 1, null, 1, null, null, null, null, 1, 1, 1, null, null, 1, 1, null, null, 1, 1, null, null, 1, null, null, 1, null, null, null, 1, 1, null, null, 1, null, null, null, null, null, null, null, 1, null, null, 1, null, null, 1, null, null, 1, null, null, 1, null, null, 1, null, null, 1, null, null, 1], "source": "# Copyright (C) 2010-2012 Marie E. Rognes\n# Modified by Andrew T. T. McRae (Imperial College London)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nimport numpy\n\nfrom FIAT.finite_element import CiarletElement\nfrom FIAT.dual_set import DualSet\nfrom FIAT.polynomial_set import ONPolynomialSet\nfrom FIAT.functional import PointEdgeTangentEvaluation as Tangent\nfrom FIAT.functional import FrobeniusIntegralMoment as IntegralMoment\nfrom FIAT.raviart_thomas import RaviartThomas\nfrom FIAT.quadrature import make_quadrature, UFCTetrahedronFaceQuadratureRule\nfrom FIAT.reference_element import UFCTetrahedron\n\n\nclass NedelecSecondKindDual(DualSet):\n    \"\"\"\n    This class represents the dual basis for the Nedelec H(curl)\n    elements of the second kind. The degrees of freedom (L) for the\n    elements of the k'th degree are\n\n    d = 2:\n\n      vertices: None\n\n      edges:    L(f) = f (x_i) * t       for (k+1) points x_i on each edge\n\n      cell:     L(f) = \\int f * g * dx   for g in RT_{k-1}\n\n\n   d = 3:\n\n      vertices: None\n\n      edges:    L(f)  = f(x_i) * t         for (k+1) points x_i on each edge\n\n      faces:    L(f) = \\int_F f * g * ds   for g in RT_{k-1}(F) for each face F\n\n      cell:     L(f) = \\int f * g * dx     for g in RT_{k-2}\n\n    Higher spatial dimensions are not yet implemented. (For d = 1,\n    these elements coincide with the CG_k elements.)\n    \"\"\"\n\n    def __init__(self, cell, degree):\n\n        # Define degrees of freedom\n        (dofs, ids) = self.generate_degrees_of_freedom(cell, degree)\n\n        # Call init of super-class\n        super(NedelecSecondKindDual, self).__init__(dofs, cell, ids)\n\n    def generate_degrees_of_freedom(self, cell, degree):\n        \"Generate dofs and geometry-to-dof maps (ids).\"\n\n        dofs = []\n        ids = {}\n\n        # Extract spatial dimension and topology\n        d = cell.get_spatial_dimension()\n        assert (d in (2, 3)), \"Second kind Nedelecs only implemented in 2/3D.\"\n\n        # Zero vertex-based degrees of freedom (d+1 of these)\n        ids[0] = dict(list(zip(list(range(d + 1)), ([] for i in range(d + 1)))))\n\n        # (d+1) degrees of freedom per entity of codimension 1 (edges)\n        (edge_dofs, edge_ids) = self._generate_edge_dofs(cell, degree, 0)\n        dofs.extend(edge_dofs)\n        ids[1] = edge_ids\n\n        # Include face degrees of freedom if 3D\n        if d == 3:\n            (face_dofs, face_ids) = self._generate_face_dofs(cell, degree,\n                                                             len(dofs))\n            dofs.extend(face_dofs)\n            ids[2] = face_ids\n\n        # Varying degrees of freedom (possibly zero) per cell\n        (cell_dofs, cell_ids) = self._generate_cell_dofs(cell, degree, len(dofs))\n        dofs.extend(cell_dofs)\n        ids[d] = cell_ids\n\n        return (dofs, ids)\n\n    def _generate_edge_dofs(self, cell, degree, offset):\n        \"\"\"Generate degrees of freedoms (dofs) for entities of\n        codimension 1 (edges).\"\"\"\n\n        # (degree+1) tangential component point evaluation degrees of\n        # freedom per entity of codimension 1 (edges)\n        dofs = []\n        ids = {}\n        for edge in range(len(cell.get_topology()[1])):\n\n            # Create points for evaluation of tangential components\n            points = cell.make_points(1, edge, degree + 2)\n\n            # A tangential component evaluation for each point\n            dofs += [Tangent(cell, edge, point) for point in points]\n\n            # Associate these dofs with this edge\n            i = len(points) * edge\n            ids[edge] = list(range(offset + i, offset + i + len(points)))\n\n        return (dofs, ids)\n\n    def _generate_face_dofs(self, cell, degree, offset):\n        \"\"\"Generate degrees of freedoms (dofs) for faces.\"\"\"\n\n        # Initialize empty dofs and identifiers (ids)\n        dofs = []\n        ids = dict(list(zip(list(range(4)), ([] for i in range(4)))))\n\n        # Return empty info if not applicable\n        d = cell.get_spatial_dimension()\n        if (degree < 2):\n            return (dofs, ids)\n\n        msg = \"2nd kind Nedelec face dofs only available with UFC convention\"\n        assert isinstance(cell, UFCTetrahedron), msg\n\n        # Iterate over the faces of the tet\n        num_faces = len(cell.get_topology()[2])\n        for face in range(num_faces):\n\n            # Construct quadrature scheme for this face\n            m = 2 * (degree + 1)\n            Q_face = UFCTetrahedronFaceQuadratureRule(face, m)\n\n            # Construct Raviart-Thomas of (degree - 1) on the\n            # reference face\n            reference_face = Q_face.reference_rule().ref_el\n            RT = RaviartThomas(reference_face, degree - 1)\n            num_rts = RT.space_dimension()\n\n            # Evaluate RT basis functions at reference quadrature\n            # points\n            ref_quad_points = Q_face.reference_rule().get_points()\n            num_quad_points = len(ref_quad_points)\n            Phi = RT.get_nodal_basis()\n            Phis = Phi.tabulate(ref_quad_points)[(0, 0)]\n\n            # Note: Phis has dimensions:\n            # num_basis_functions x num_components x num_quad_points\n\n            # Map Phis -> phis (reference values to physical values)\n            J = Q_face.jacobian()\n            scale = 1.0 / numpy.sqrt(numpy.linalg.det(J.transpose() * J))\n            phis = numpy.ndarray((d, num_quad_points))\n            for i in range(num_rts):\n                for q in range(num_quad_points):\n                    phi_i_q = scale * J * numpy.matrix(Phis[i, :, q]).transpose()\n                    for j in range(d):\n                        phis[j, q] = phi_i_q[j]\n\n                # Construct degrees of freedom as integral moments on\n                # this cell, using the special face quadrature\n                # weighted against the values of the (physical)\n                # Raviart--Thomas'es on the face\n                dofs += [IntegralMoment(cell, Q_face, phis)]\n\n            # Assign identifiers (num RTs per face + previous edge dofs)\n            ids[face] = list(range(offset + num_rts*face, offset + num_rts*(face + 1)))\n\n        return (dofs, ids)\n\n    def _generate_cell_dofs(self, cell, degree, offset):\n        \"\"\"Generate degrees of freedoms (dofs) for entities of\n        codimension d (cells).\"\"\"\n\n        # Return empty info if not applicable\n        d = cell.get_spatial_dimension()\n        if (d == 2 and degree < 2) or (d == 3 and degree < 3):\n            return ([], {0: []})\n\n        # Create quadrature points\n        Q = make_quadrature(cell, 2 * (degree + 1))\n        qs = Q.get_points()\n\n        # Create Raviart-Thomas nodal basis\n        RT = RaviartThomas(cell, degree + 1 - d)\n        phi = RT.get_nodal_basis()\n\n        # Evaluate Raviart-Thomas basis at quadrature points\n        phi_at_qs = phi.tabulate(qs)[(0,) * d]\n\n        # Use (Frobenius) integral moments against RTs as dofs\n        dofs = [IntegralMoment(cell, Q, phi_at_qs[i, :])\n                for i in range(len(phi_at_qs))]\n\n        # Associate these dofs with the interior\n        ids = {0: list(range(offset, offset + len(dofs)))}\n        return (dofs, ids)\n\n\nclass NedelecSecondKind(CiarletElement):\n    \"\"\"\n    The H(curl) Nedelec elements of the second kind on triangles and\n    tetrahedra: the polynomial space described by the full polynomials\n    of degree k, with a suitable set of degrees of freedom to ensure\n    H(curl) conformity.\n    \"\"\"\n\n    def __init__(self, cell, degree):\n\n        # Check degree\n        assert degree >= 1, \"Second kind Nedelecs start at 1!\"\n\n        # Get dimension\n        d = cell.get_spatial_dimension()\n\n        # Construct polynomial basis for d-vector fields\n        Ps = ONPolynomialSet(cell, degree, (d, ))\n\n        # Construct dual space\n        Ls = NedelecSecondKindDual(cell, degree)\n\n        # Set form degree\n        formdegree = 1  # 1-form\n\n        # Set mapping\n        mapping = \"covariant piola\"\n\n        # Call init of super-class\n        super(NedelecSecondKind, self).__init__(Ps, Ls, degree, formdegree, mapping=mapping)\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/nedelec_second_kind.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, 1, 1, 1, null, 1, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, null, 1, 0, null, null, null, 1, 1, 1, 1, null, 1, 1, 1, null, 1, null, null, 1, null, 1, null, 1, null, 1, null, null, 1, 1, 1, null, null, null, null, null, null, null, 1, 1, null, null, null, 1, 1, null, null, null, 1, null, null, null, 1, null, 1, null, null, 1, 1, null, null, 1, null, null, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, null, null, 1, 1, 1, 1, 1, 1, 1, 1, 1, null, 1, null, null, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "source": "# Copyright (C) 2013 Andrew T. T. McRae, 2015-2016 Jan Blechta\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nimport numpy as np\n\nfrom FIAT.polynomial_set import PolynomialSet\nfrom FIAT.dual_set import DualSet\nfrom FIAT.finite_element import CiarletElement\n\n__all__ = ['NodalEnrichedElement']\n\n\nclass NodalEnrichedElement(CiarletElement):\n    \"\"\"NodalEnriched element is a direct sum of a sequence of\n    finite elements. Dual basis is reorthogonalized to the\n    primal basis for nodality.\n\n    The following is equivalent:\n        * the constructor is well-defined,\n        * the resulting element is unisolvent and its basis is nodal,\n        * the supplied elements are unisolvent with nodal basis and\n          their primal bases are mutually linearly independent,\n        * the supplied elements are unisolvent with nodal basis and\n          their dual bases are mutually linearly independent.\n    \"\"\"\n\n    def __init__(self, *elements):\n\n        # Test elements are nodal\n        if not all(e.is_nodal() for e in elements):\n            raise ValueError(\"Not all elements given for construction \"\n                             \"of NodalEnrichedElement are nodal\")\n\n        # Extract common data\n        ref_el = elements[0].get_reference_element()\n        expansion_set = elements[0].get_nodal_basis().get_expansion_set()\n        degree = min(e.get_nodal_basis().get_degree() for e in elements)\n        embedded_degree = max(e.get_nodal_basis().get_embedded_degree()\n                              for e in elements)\n        order = max(e.get_order() for e in elements)\n        mapping = elements[0].mapping()[0]\n        formdegree = None if any(e.get_formdegree() is None for e in elements) \\\n            else max(e.get_formdegree() for e in elements)\n        value_shape = elements[0].value_shape()\n\n        # Sanity check\n        assert all(e.get_nodal_basis().get_reference_element() ==\n                   ref_el for e in elements)\n        assert all(type(e.get_nodal_basis().get_expansion_set()) ==\n                   type(expansion_set) for e in elements)\n        assert all(e_mapping == mapping for e in elements\n                   for e_mapping in e.mapping())\n        assert all(e.value_shape() == value_shape for e in elements)\n\n        # Merge polynomial sets\n        coeffs = _merge_coeffs([e.get_coeffs() for e in elements])\n        dmats = _merge_dmats([e.dmats() for e in elements])\n        poly_set = PolynomialSet(ref_el,\n                                 degree,\n                                 embedded_degree,\n                                 expansion_set,\n                                 coeffs,\n                                 dmats)\n\n        # Renumber dof numbers\n        offsets = np.cumsum([0] + [e.space_dimension() for e in elements[:-1]])\n        entity_ids = _merge_entity_ids((e.entity_dofs() for e in elements),\n                                       offsets)\n\n        # Merge dual bases\n        nodes = [node for e in elements for node in e.dual_basis()]\n        dual_set = DualSet(nodes, ref_el, entity_ids)\n\n        # CiarletElement constructor adjusts poly_set coefficients s.t.\n        # dual_set is really dual to poly_set\n        super(NodalEnrichedElement, self).__init__(poly_set, dual_set, order,\n                                                   formdegree=formdegree, mapping=mapping)\n\n\ndef _merge_coeffs(coeffss):\n    # Number of bases members\n    total_dim = sum(c.shape[0] for c in coeffss)\n\n    # Value shape\n    value_shape = coeffss[0].shape[1:-1]\n    assert all(c.shape[1:-1] == value_shape for c in coeffss)\n\n    # Number of expansion polynomials\n    max_expansion_dim = max(c.shape[-1] for c in coeffss)\n\n    # Compose new coeffs\n    shape = (total_dim,) + value_shape + (max_expansion_dim,)\n    new_coeffs = np.zeros(shape, dtype=coeffss[0].dtype)\n    counter = 0\n    for c in coeffss:\n        dim = c.shape[0]\n        expansion_dim = c.shape[-1]\n        new_coeffs[counter:counter+dim, ..., :expansion_dim] = c\n        counter += dim\n    assert counter == total_dim\n    return new_coeffs\n\n\ndef _merge_dmats(dmatss):\n    shape, arg = max((dmats[0].shape, args) for args, dmats in enumerate(dmatss))\n    assert len(shape) == 2 and shape[0] == shape[1]\n    new_dmats = []\n    for dim in range(len(dmatss[arg])):\n        new_dmats.append(dmatss[arg][dim].copy())\n        for dmats in dmatss:\n            sl = slice(0, dmats[dim].shape[0]), slice(0, dmats[dim].shape[1])\n            assert np.allclose(dmats[dim], new_dmats[dim][sl]), \\\n                \"dmats of elements to be directly summed are not matching!\"\n    return new_dmats\n\n\ndef _merge_entity_ids(entity_ids, offsets):\n    ret = {}\n    for i, ids in enumerate(entity_ids):\n        for dim in ids:\n            if not ret.get(dim):\n                ret[dim] = {}\n            for entity in ids[dim]:\n                if not ret[dim].get(entity):\n                    ret[dim][entity] = []\n                ret[dim][entity] += (np.array(ids[dim][entity]) + offsets[i]).tolist()\n    return ret\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/nodal_enriched.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, 1, 1, null, 1, 1, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, null, 1, 1, 1, 1, 1, 1, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, null, 1, 0, 0, null, 1, 1, 1, 1, 1, 1, 1, null, 1, null, 1, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 0, 0, null, null, 0, 0, null, 0, 0, null, 0, null, 0, 0, 0, null, 0, null, null, 1, null, null, null, null, 0, 0, 0, null, 0, 0, null, 0, 0, null, 0, null, null, 1, null, null, null, null, null, null, null, 0, 0, null, 0, 0, 0, 0, null, 0, null, 0, 0, null, 0, 0, 0, 0, 0, 0, 0, 0, null, null, 0, 0, 0, null, 0, 0, null, null, 1, null, null, null, null, null, null, null, null, 0, 0, 0, null, null, 0, null, 0, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 0, 0, null, 0, null, 0, null, 0, 0, 0, null, 0, 0, 0, null, null, 0, 0, null, 0, 0, 0, 0, null, 0, 0, null, 0, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 0, 0, null, 0, 0, null, 0, null, 0, 0, 0, 0, null, 0, 0, null, 0, null, 0, 0, 0, null, null, 0, null, 0, null, 0, null, 0, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 0, 0, 0, 0], "source": "\"\"\"\n    orthopoly.py - A suite of functions for generating orthogonal polynomials\n    and quadrature rules.\n\n    Copyright (c) 2014 Greg von Winckel\n    All rights reserved.\n\n    Permission is hereby granted, free of charge, to any person obtaining\n    a copy of this software and associated documentation files (the\n    \"Software\"), to deal in the Software without restriction, including\n    without limitation the rights to use, copy, modify, merge, publish,\n    distribute, sublicense, and/or sell copies of the Software, and to\n    permit persons to whom the Software is furnished to do so, subject to\n    the following conditions:\n\n    The above copyright notice and this permission notice shall be\n    included in all copies or substantial portions of the Software.\n\n    THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n    MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n    IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n    CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n    TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n    SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n    Last updated on Wed Jan  1 14:29:25 MST 2014\n\n    Modified by David A. Ham (david.ham@imperial.ac.uk), 2016\n\"\"\"\n\nimport numpy as np\nfrom functools import reduce\nfrom math import gamma\n\n\ndef gauss(alpha, beta):\n    \"\"\"\n    Compute the Gauss nodes and weights from the recursion\n    coefficients associated with a set of orthogonal polynomials\n\n    Inputs:\n    alpha - recursion coefficients\n    beta - recursion coefficients\n\n    Outputs:\n    x - quadrature nodes\n    w - quadrature weights\n\n    Adapted from the MATLAB code by Walter Gautschi\n    http://www.cs.purdue.edu/archives/2002/wxg/codes/gauss.m\n    \"\"\"\n\n    from numpy.linalg import eigh\n\n    A = np.diag(np.sqrt(beta)[1:], 1) + np.diag(alpha)\n    x, V = eigh(A, \"U\")\n\n    w = beta[0] * np.real(np.power(V[0, :], 2))\n    return x, w\n\n\ndef lobatto(alpha, beta, xl1, xl2):\n    \"\"\"\n        Compute the Lobatto nodes and weights with the preassigned\n        nodea xl1,xl2\n\n        Inputs:\n        alpha - recursion coefficients\n        beta - recursion coefficients\n        xl1 - assigned node location\n        xl2 - assigned node location\n\n        Outputs:\n        x - quadrature nodes\n        w - quadrature weights\n\n        Based on the section 7 of the paper\n        \"Some modified matrix eigenvalue problems\"\n        by Gene Golub, SIAM Review Vol 15, No. 2, April 1973, pp.318--334\n    \"\"\"\n    from numpy.linalg import solve\n    n = len(alpha) - 1\n    en = np.zeros(n)\n    en[-1] = 1\n    A1 = np.vstack((np.sqrt(beta), alpha - xl1))\n    J1 = np.diag(A1[0, 1:-1], 1) + np.diag(A1[1, 1:]) + np.diag(A1[0, 1:-1], -1)\n    A2 = np.vstack((np.sqrt(beta), alpha - xl2))\n    J2 = np.diag(A2[0, 1:-1], 1) + np.diag(A2[1, 1:]) + np.diag(A2[0, 1:-1], -1)\n    g1 = solve(J1, en)\n    g2 = solve(J2, en)\n    C = np.array(((1, -g1[-1]), (1, -g2[-1])))\n    xl = np.array((xl1, xl2))\n    ab = solve(C, xl)\n\n    alphal = alpha\n    alphal[-1] = ab[0]\n    betal = beta\n    betal[-1] = ab[1]\n    x, w = gauss(alphal, betal)\n    return x, w\n\n\ndef rec_jacobi(N, a, b):\n    \"\"\"\n    Generate the recursion coefficients alpha_k, beta_k\n\n    P_{k+1}(x) = (x-alpha_k)*P_{k}(x) - beta_k P_{k-1}(x)\n\n    for the Jacobi polynomials which are orthogonal on [-1,1]\n    with respect to the weight w(x)=[(1-x)^a]*[(1+x)^b]\n\n    Inputs:\n    N - polynomial order\n    a - weight parameter\n    b - weight parameter\n\n    Outputs:\n    alpha - recursion coefficients\n    beta - recursion coefficients\n\n    Adapted from the MATLAB code by Dirk Laurie and Walter Gautschi\n    http://www.cs.purdue.edu/archives/2002/wxg/codes/r_jacobi.m\n    \"\"\"\n\n    nu = (b - a) / float(a + b + 2)\n    mu = 2 ** (a + b + 1) * gamma(a + 1) * gamma(b + 1) / gamma(a + b + 2)\n\n    if N == 1:\n        alpha = nu\n        beta = mu\n    else:\n        n = np.arange(1.0, N)\n        nab = 2 * n + a + b\n        alpha = np.hstack((nu, (b ** 2 - a ** 2) / (nab * (nab + 2))))\n        n = n[1:]\n        nab = nab[1:]\n        B1 = 4 * (a + 1) * (b + 1) / float((a + b + 2) ** 2 * (a + b + 3))\n        B = 4 * (n + a) * (n + b) * n * (n + a + b) / \\\n            (nab ** 2 * (nab + 1) * (nab - 1))\n        beta = np.hstack((mu, B1, B))\n\n    return alpha, beta\n\n\ndef rec_jacobi01(N, a, b):\n    \"\"\"\n    Generate the recursion coefficients alpha_k, beta_k\n    for the Jacobi polynomials which are orthogonal on [0,1]\n\n    See rec_jacobi for the recursion coefficients on [-1,1]\n\n    Inputs:\n    N - polynomial order\n    a - weight parameter\n    b - weight parameter\n\n    Outputs:\n    alpha - recursion coefficients\n    beta - recursion coefficients\n\n    Adapted from the MATLAB implementation:\n    https://www.cs.purdue.edu/archives/2002/wxg/codes/r_jacobi01.m\n\n    \"\"\"\n\n    if a <= -1 or b <= -1:\n        raise ValueError('''Jacobi coefficients are defined only\n                            for alpha,beta > -1''')\n\n    if not isinstance(N, int):\n        raise TypeError('N must be an integer')\n\n    if N < 1:\n        raise ValueError('N must be at least 1')\n\n    c, d = rec_jacobi(N, a, b)\n\n    alpha = (1 + c) / 2\n    beta = d / 4\n    beta[0] = d[0] / 2 ** (a + b + 1)\n\n    return alpha, beta\n\n\ndef polyval(alpha, beta, x):\n    \"\"\"\n    Evaluate polynomials on x given the recursion coefficients alpha and beta\n    \"\"\"\n\n    N = len(alpha)\n    m = len(x)\n    P = np.zeros((m, N + 1))\n\n    P[:, 0] = 1\n    P[:, 1] = (x - alpha[0]) * P[:, 0]\n\n    for k in range(1, N):\n        P[:, k + 1] = (x - alpha[k]) * P[:, k] - beta[k] * P[:, k - 1]\n\n    return P\n\n\ndef jacobi(N, a, b, x, NOPT=1):\n    \"\"\"\n    JACOBI computes the Jacobi polynomials which are orthogonal on [-1,1]\n    with respect to the weight w(x)=[(1-x)^a]*[(1+x)^b] and evaluate them\n    on the given grid up to P_N(x). Setting NOPT=2 returns the\n    L2-normalized polynomials\n    \"\"\"\n\n    m = len(x)\n    P = np.zeros((m, N + 1))\n\n    apb = a + b\n    a1 = a - 1\n    b1 = b - 1\n    c = apb * (a - b)\n\n    P[:, 0] = 1\n\n    if N > 0:\n        P[:, 1] = 0.5 * (a - b + (apb + 2) * x)\n\n    if N > 1:\n        for k in range(2, N + 1):\n            k2 = 2 * k\n            g = k2 + apb\n            g1 = g - 1\n            g2 = g - 2\n            d = 2.0 * (k + a1) * (k + b1) * g\n            P[:, k] = (g1 * (c + g2 * g * x) * P[:, k - 1] -\n                       d * P[:, k - 2]) / (k2 * (k + apb) * g2)\n\n    if NOPT == 2:\n        k = np.arange(N + 1)\n        pnorm = 2 ** (apb + 1) * gamma(k + a + 1) * gamma(k + b + 1) / \\\n            ((2 * k + a + b + 1) * (gamma(k + 1) * gamma(k + a + b + 1)))\n        P *= 1 / np.sqrt(pnorm)\n    return P\n\n\ndef jacobiD(N, a, b, x, NOPT=1):\n    \"\"\"\n    JACOBID computes the first derivatives of the normalized Jacobi\n    polynomials which are orthogonal on [-1,1] with respect\n    to the weight w(x)=[(1-x)^a]*[(1+x)^b] and evaluate them\n    on the given grid up to P_N(x). Setting NOPT=2 returns\n    the derivatives of the L2-normalized polynomials\n    \"\"\"\n\n    z = np.zeros((len(x), 1))\n    if N == 0:\n        Px = z\n    else:\n\n        Px = 0.5 * np.hstack((z, jacobi(N - 1, a + 1, b + 1, x, NOPT) *\n                              ((a + b + 2 + np.arange(N)))))\n    return Px\n\n\ndef mm_log(N, a):\n    \"\"\"\n    MM_LOG Modified moments for a logarithmic weight function.\n\n    The call mm=MM_LOG(n,a) computes the first n modified moments of the\n    logarithmic weight function w(t)=t^a log(1/t) on [0,1] relative to\n    shifted Legendre polynomials.\n\n    REFERENCE:  Walter Gautschi,``On the preceding paper `A Legendre\n                polynomial integral' by James L. Blue'',\n                Math. Comp. 33 (1979), 742-743.\n\n    Adapted from the MATLAB implementation:\n    https://www.cs.purdue.edu/archives/2002/wxg/codes/mm_log.m\n    \"\"\"\n\n    if a <= -1:\n        raise ValueError('Parameter a must be greater than -1')\n\n    prod = lambda z: reduce(lambda x, y: x * y, z, 1)\n\n    mm = np.zeros(N)\n\n    c = 1\n    for n in range(N):\n        if isinstance(a, int) and a < n:\n\n            p = range(n - a, n + a + 2)\n            mm[n] = (-1) ** (n - a) / prod(p)\n            mm[n] *= gamma(a + 1) ** 2\n\n        else:\n            if n == 0:\n                mm[0] = 1 / (a + 1) ** 2\n            else:\n                k = np.arange(1, n + 1)\n                s = 1 / (a + 1 + k) - 1 / (a + 1 - k)\n                p = (a + 1 - k) / (a + 1 + k)\n                mm[n] = (1 / (a + 1) + sum(s)) * prod(p) / (a + 1)\n\n        mm[n] *= c\n        c *= 0.5 * (n + 1) / (2 * n + 1)\n\n    return mm\n\n\ndef mod_chebyshev(N, mom, alpham, betam):\n    \"\"\"\n    Calcuate the recursion coefficients for the orthogonal polynomials\n    which are are orthogonal with respect to a weight function which is\n    represented in terms of its modifed moments which are obtained by\n    integrating the monic polynomials against the weight function.\n\n    REFERENCES:\n\n    John C. Wheeler, \"Modified moments and Gaussian quadratures\"\n    Rocky Mountain Journal of Mathematics, Vol. 4, Num. 2 (1974), 287--296\n\n    Walter Gautschi, \"Orthogonal Polynomials (in Matlab)\n    Journal of Computational and Applied Mathematics, Vol. 178 (2005) 215--234\n\n    Adapted from the MATLAB implementation:\n    https://www.cs.purdue.edu/archives/2002/wxg/codes/chebyshev.m\n    \"\"\"\n\n    if not isinstance(N, int):\n        raise TypeError('N must be an integer')\n\n    if N < 1:\n        raise ValueError('N must be at least 1')\n\n    N = min(N, int(len(mom) / 2))\n\n    alpha = np.zeros(N)\n    beta = np.zeros(N)\n    normsq = np.zeros(N)\n    sig = np.zeros((N + 1, 2 * N))\n\n    alpha[0] = alpham[0] + mom[1] / mom[0]\n    beta[0] = mom[0]\n\n    sig[1, :] = mom\n\n    for n in range(2, N + 1):\n        for m in range(n - 1, 2 * N - n + 1):\n            sig[n, m] = sig[n - 1, m + 1] - (alpha[n - 2] - alpham[m]) * sig[n - 1, m] - \\\n                beta[n - 2] * sig[n - 2, m] + betam[m] * sig[n - 1, m - 1]\n\n        alpha[n - 1] = alpham[n - 1] + sig[n, n] / sig[n, n - 1] - sig[n - 1, n - 1] / \\\n            sig[n - 1, n - 2]\n        beta[n - 1] = sig[n, n - 1] / sig[n - 1, n - 2]\n\n    normsq = np.diagonal(sig, -1)\n\n    return alpha, beta, normsq\n\n\ndef rec_jaclog(N, a):\n    \"\"\"\n    Generate the recursion coefficients alpha_k, beta_k\n\n    P_{k+1}(x) = (x-alpha_k)*P_{k}(x) - beta_k P_{k-1}(x)\n\n    for the monic polynomials which are orthogonal on [0,1]\n    with respect to the weight w(x)=x^a*log(1/x)\n\n    Inputs:\n    N - polynomial order\n    a - weight parameter\n\n    Outputs:\n    alpha - recursion coefficients\n    beta - recursion coefficients\n\n    Adated from the MATLAB code:\n    https://www.cs.purdue.edu/archives/2002/wxg/codes/r_jaclog.m\n    \"\"\"\n    alphaj, betaj = rec_jacobi01(2 * N, 0, 0)\n    mom = mm_log(2 * N, a)\n    alpha, beta, _ = mod_chebyshev(N, mom, alphaj, betaj)\n    return alpha, beta\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/orthopoly.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, null, null, 1, null, 1, 1, 1, 1, null, 1, null, null, null, null, null, null, null, null, null, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, 1, 1, 1, 1, 1, 1, 1, null, 1, 0, null, null, 1, null, 1, 1, 1, 1, 1, 1, 1, null, null, 1, null, 1, 1, null, 1, 1, null, 1, 1, null, 1, 1, null, 1, 1, null, 1, 1, null, 1, 1, null, 1, null, null, 1, null, 1, null, 1, 1, null, null, null, 1, null, null, null, null, null, null, 1, null, 1, 1, null, 1, 1, 1, 1, 1, 1, 1, null, null, 1, 1, null, null, 1, null, 1, 1, null, 1, 1, 1, 1, 1, 1, null, null, 1, 1, null, 1, null, 1, 1, null, 1, 1, null, null, 1, null, null, 1, null, null, null, 1, null, null, null, null, null, 0, 0, 0, 0, 0, 0, null, null, 1, null, 1, 1, 1, 1, 1, 1, null, null, 1, null, null, null, null, null, null, 1, 1, 1, 0, 0, 0, null, 1, 1, 1, 1, 1, 1, null, 1, null, 1, null, null, null, null, null, null, null, 1, null, null, null, null, null, 1, null, 1, 1, 1, null, 1, 1, 1, 1, 1, 1, null, null, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, null, null, 1, 1, 1, 1, 1, null, 1, 1, null], "source": "# Copyright (C) 2008-2012 Robert C. Kirby (Texas Tech University)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\n# polynomial sets\n# basic interface:\n# -- defined over some reference element\n# -- need to be able to tabulate (jets)\n# -- type of entry: could by scalar, numpy array, or object-value\n#    (such as symmetric tensors, as long as they can be converted <-->\n#    with 1d arrays)\n# Don't need the \"Polynomial\" class we had before, provided that\n# we have an interface for defining sets of functionals (moments against\n# an entire set of polynomials)\n\nimport numpy\nfrom FIAT import expansions\nfrom FIAT.functional import index_iterator\n\n\ndef mis(m, n):\n    \"\"\"returns all m-tuples of nonnegative integers that sum up to n.\"\"\"\n    if m == 1:\n        return [(n,)]\n    elif n == 0:\n        return [tuple([0] * m)]\n    else:\n        return [tuple([n - i] + list(foo))\n                for i in range(n + 1)\n                for foo in mis(m - 1, i)]\n\n\n# We order coeffs by C_{i,j,k}\n# where i is the index into the polynomial set,\n# j may be an empty tuple (scalar polynomials)\n#   or else a vector/tensor\n# k is the expansion function\n# so if I have all bfs at a given point x in an array bf,\n# then dot(coeffs, bf) gives the array of bfs\nclass PolynomialSet(object):\n    \"\"\"Implements a set of polynomials as linear combinations of an\n    expansion set over a reference element.\n    ref_el: the reference element\n    degree: an order labeling the space\n    embedded degree: the degree of polynomial expansion basis that\n         must be used to evaluate this space\n    coeffs: A numpy array containing the coefficients of the expansion\n         basis for each member of the set.  Coeffs is ordered by\n         coeffs[i,j,k] where i is the label of the member, k is\n         the label of the expansion function, and j is a (possibly\n         empty) tuple giving the index for a vector- or tensor-valued\n         function.\n    \"\"\"\n\n    def __init__(self, ref_el, degree, embedded_degree, expansion_set, coeffs,\n                 dmats):\n        self.ref_el = ref_el\n        self.num_members = coeffs.shape[0]\n        self.degree = degree\n        self.embedded_degree = embedded_degree\n        self.expansion_set = expansion_set\n        self.coeffs = coeffs\n        self.dmats = dmats\n\n    def tabulate_new(self, pts):\n        return numpy.dot(self.coeffs,\n                         self.expansion_set.tabulate(self.embedded_degree, pts))\n\n    def tabulate(self, pts, jet_order=0):\n        \"\"\"Returns the values of the polynomial set.\"\"\"\n        result = {}\n        base_vals = self.expansion_set.tabulate(self.embedded_degree, pts)\n        for i in range(jet_order + 1):\n            alphas = mis(self.ref_el.get_spatial_dimension(), i)\n            for alpha in alphas:\n                D = form_matrix_product(self.dmats, alpha)\n                result[alpha] = numpy.dot(self.coeffs,\n                                          numpy.dot(numpy.transpose(D),\n                                                    base_vals))\n        return result\n\n    def get_expansion_set(self):\n        return self.expansion_set\n\n    def get_coeffs(self):\n        return self.coeffs\n\n    def get_num_members(self):\n        return self.num_members\n\n    def get_degree(self):\n        return self.degree\n\n    def get_embedded_degree(self):\n        return self.embedded_degree\n\n    def get_dmats(self):\n        return self.dmats\n\n    def get_reference_element(self):\n        return self.ref_el\n\n    def get_shape(self):\n        \"\"\"Returns the shape of phi(x), where () corresponds to\n        scalar (2,) a vector of length 2, etc\"\"\"\n        return self.coeffs.shape[1:-1]\n\n    def take(self, items):\n        \"\"\"Extracts subset of polynomials given by items.\"\"\"\n        new_coeffs = numpy.take(self.get_coeffs(), items, 0)\n        return PolynomialSet(self.ref_el, self.degree, self.embedded_degree,\n                             self.expansion_set, new_coeffs, self.dmats)\n\n\nclass ONPolynomialSet(PolynomialSet):\n    \"\"\"Constructs an orthonormal basis out of expansion set by having an\n    identity matrix of coefficients.  Can be used to specify ON bases\n    for vector- and tensor-valued sets as well.\n\n    \"\"\"\n\n    def __init__(self, ref_el, degree, shape=tuple()):\n\n        if shape == tuple():\n            num_components = 1\n        else:\n            flat_shape = numpy.ravel(shape)\n            num_components = numpy.prod(flat_shape)\n        num_exp_functions = expansions.polynomial_dimension(ref_el, degree)\n        num_members = num_components * num_exp_functions\n        embedded_degree = degree\n        expansion_set = expansions.get_expansion_set(ref_el)\n        sd = ref_el.get_spatial_dimension()\n\n        # set up coefficients\n        coeffs_shape = tuple([num_members] + list(shape) + [num_exp_functions])\n        coeffs = numpy.zeros(coeffs_shape, \"d\")\n\n        # use functional's index_iterator function\n        cur_bf = 0\n\n        if shape == tuple():\n            coeffs = numpy.eye(num_members)\n        else:\n            for idx in index_iterator(shape):\n                n = expansions.polynomial_dimension(ref_el, embedded_degree)\n                for exp_bf in range(n):\n                    cur_idx = tuple([cur_bf] + list(idx) + [exp_bf])\n                    coeffs[cur_idx] = 1.0\n                    cur_bf += 1\n\n        # construct dmats\n        if degree == 0:\n            dmats = [numpy.array([[0.0]], \"d\") for i in range(sd)]\n        else:\n            pts = ref_el.make_points(sd, 0, degree + sd + 1)\n\n            v = numpy.transpose(expansion_set.tabulate(degree, pts))\n            vinv = numpy.linalg.inv(v)\n\n            dv = expansion_set.tabulate_derivatives(degree, pts)\n            dtildes = [[[a[1][i] for a in dvrow] for dvrow in dv]\n                       for i in range(sd)]\n\n            dmats = [numpy.dot(vinv, numpy.transpose(dtilde))\n                     for dtilde in dtildes]\n\n        PolynomialSet.__init__(self, ref_el, degree, embedded_degree,\n                               expansion_set, coeffs, dmats)\n\n\ndef project(f, U, Q):\n    \"\"\"Computes the expansion coefficients of f in terms of the members of\n    a polynomial set U.  Numerical integration is performed by\n    quadrature rule Q.\n\n    \"\"\"\n    pts = Q.get_points()\n    wts = Q.get_weights()\n    f_at_qps = [f(x) for x in pts]\n    U_at_qps = U.tabulate(pts)\n    coeffs = numpy.array([sum(wts * f_at_qps * phi) for phi in U_at_qps])\n    return coeffs\n\n\ndef form_matrix_product(mats, alpha):\n    \"\"\"forms product over mats[i]**alpha[i]\"\"\"\n    m = mats[0].shape[0]\n    result = numpy.eye(m)\n    for i in range(len(alpha)):\n        for j in range(alpha[i]):\n            result = numpy.dot(mats[i], result)\n    return result\n\n\ndef polynomial_set_union_normalized(A, B):\n    \"\"\"Given polynomial sets A and B, constructs a new polynomial set\n    whose span is the same as that of span(A) union span(B).  It may\n    not contain any of the same members of the set, as we construct a\n    span via SVD.\n\n    \"\"\"\n    new_coeffs = numpy.array(list(A.coeffs) + list(B.coeffs))\n    func_shape = new_coeffs.shape[1:]\n    if len(func_shape) == 1:\n        (u, sig, vt) = numpy.linalg.svd(new_coeffs)\n        num_sv = len([s for s in sig if abs(s) > 1.e-10])\n        coeffs = vt[:num_sv]\n    else:\n        new_shape0 = new_coeffs.shape[0]\n        new_shape1 = numpy.prod(func_shape)\n        newshape = (new_shape0, new_shape1)\n        nc = numpy.reshape(new_coeffs, newshape)\n        (u, sig, vt) = numpy.linalg.svd(nc, 1)\n        num_sv = len([s for s in sig if abs(s) > 1.e-10])\n\n        coeffs = numpy.reshape(vt[:num_sv], tuple([num_sv] + list(func_shape)))\n\n    return PolynomialSet(A.get_reference_element(),\n                         A.get_degree(),\n                         A.get_embedded_degree(),\n                         A.get_expansion_set(),\n                         coeffs,\n                         A.get_dmats())\n\n\nclass ONSymTensorPolynomialSet(PolynomialSet):\n    \"\"\"Constructs an orthonormal basis for symmetric-tensor-valued\n    polynomials on a reference element.\n\n    \"\"\"\n\n    def __init__(self, ref_el, degree, size=None):\n\n        sd = ref_el.get_spatial_dimension()\n        if size is None:\n            size = sd\n\n        shape = (size, size)\n        num_exp_functions = expansions.polynomial_dimension(ref_el, degree)\n        num_components = size * (size + 1) // 2\n        num_members = num_components * num_exp_functions\n        embedded_degree = degree\n        expansion_set = expansions.get_expansion_set(ref_el)\n\n        # set up coefficients for symmetric tensors\n        coeffs_shape = tuple([num_members] + list(shape) + [num_exp_functions])\n        coeffs = numpy.zeros(coeffs_shape, \"d\")\n        cur_bf = 0\n        for [i, j] in index_iterator(shape):\n            n = expansions.polynomial_dimension(ref_el, embedded_degree)\n            if i == j:\n                for exp_bf in range(n):\n                    cur_idx = tuple([cur_bf] + [i, j] + [exp_bf])\n                    coeffs[cur_idx] = 1.0\n                    cur_bf += 1\n            elif i < j:\n                for exp_bf in range(n):\n                    cur_idx = tuple([cur_bf] + [i, j] + [exp_bf])\n                    coeffs[cur_idx] = 1.0\n                    cur_idx = tuple([cur_bf] + [j, i] + [exp_bf])\n                    coeffs[cur_idx] = 1.0\n                    cur_bf += 1\n\n        # construct dmats. this is the same as ONPolynomialSet.\n        pts = ref_el.make_points(sd, 0, degree + sd + 1)\n        v = numpy.transpose(expansion_set.tabulate(degree, pts))\n        vinv = numpy.linalg.inv(v)\n        dv = expansion_set.tabulate_derivatives(degree, pts)\n        dtildes = [[[a[1][i] for a in dvrow] for dvrow in dv]\n                   for i in range(sd)]\n        dmats = [numpy.dot(vinv, numpy.transpose(dtilde)) for dtilde in dtildes]\n        PolynomialSet.__init__(self, ref_el, degree, embedded_degree,\n                               expansion_set, coeffs, dmats)\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/polynomial_set.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, null, 1, null, null, 1, null, null, null, 1, 1, 1, null, 1, 1, 1, null, 1, 1, null, 1, 1, null, 1, 1, null, null, 1, null, null, null, 1, null, null, 1, null, 1, 1, null, null, 1, null, 1, null, 1, 1, null, 1, null, null, 1, null, null, null, null, null, null, 1, 1, 0, null, null, 1, 1, null, 1, null, 1, 1, null, null, 1, 1, null, 1, null, null, 1, null, 1, null, 1, 1, null, 1, null, null, 1, null, null, null, null, null, null, 1, 1, 0, null, null, 1, null, 1, null, null, 1, null, 1, null, 1, 1, null, 1, null, null, 1, null, null, null, null, 1, 1, 1, null, null, 1, null, null, 1, 1, null, 1, null, 1, null, 1, null, 1, null, 1, null, null, 1, null, null, null, null, 1, 1, 1, 1, null, null, 1, null, null, 1, 1, null, 1, null, 1, null, 1, null, 1, null, null, 1, null, null, 1, null, null, null, null, 1, null, null, 1, 1, 1, 1, null, null, 1, 1, 1, null, null, 1, 1, 1, 1, null, 1, null, 1, null, null, 1, 1, null, null, 1, 1, 1, null, 1, 1, null, 1, 1, null, null, 1, null, null, null, null, 1, 0, null, 1, null, 1, 1, null, 1, 1, 1, 1, 1, 1, 1, 1, null, null, 1, null, null, 1, null, null, 1, null, 1, null, 1, null, null, null, 1, null, null, null, null, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, null, 1, null, 1, 1, null, 1, null, 1, 1, null, null, 1, 1, null, 1, 1, 1, 1, 1, 1, null, 1, null, null, 1], "source": "# Copyright (C) 2008 Robert C. Kirby (Texas Tech University)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n#\n# Modified by Marie E. Rognes (meg@simula.no), 2012\n# Modified by David A. Ham (david.ham@imperial.ac.uk), 2015\n\nimport itertools\nimport math\nimport numpy\n\nfrom FIAT import reference_element, expansions, jacobi, orthopoly\n\n\nclass QuadratureRule(object):\n    \"\"\"General class that models integration over a reference element\n    as the weighted sum of a function evaluated at a set of points.\"\"\"\n\n    def __init__(self, ref_el, pts, wts):\n        if len(wts) != len(pts):\n            raise ValueError(\"Have %d weights, but %d points\" % (len(wts), len(pts)))\n\n        self.ref_el = ref_el\n        self.pts = pts\n        self.wts = wts\n\n    def get_points(self):\n        return numpy.array(self.pts)\n\n    def get_weights(self):\n        return numpy.array(self.wts)\n\n    def integrate(self, f):\n        return sum([w * f(x) for (x, w) in zip(self.pts, self.wts)])\n\n\nclass GaussJacobiQuadratureLineRule(QuadratureRule):\n    \"\"\"Gauss-Jacobi quadature rule determined by Jacobi weights a and b\n    using m roots of m:th order Jacobi polynomial.\"\"\"\n\n    def __init__(self, ref_el, m):\n        # this gives roots on the default (-1,1) reference element\n        #        (xs_ref, ws_ref) = compute_gauss_jacobi_rule(a, b, m)\n        (xs_ref, ws_ref) = compute_gauss_jacobi_rule(0., 0., m)\n\n        Ref1 = reference_element.DefaultLine()\n        A, b = reference_element.make_affine_mapping(Ref1.get_vertices(),\n                                                     ref_el.get_vertices())\n\n        mapping = lambda x: numpy.dot(A, x) + b\n\n        scale = numpy.linalg.det(A)\n\n        xs = tuple([tuple(mapping(x_ref)[0]) for x_ref in xs_ref])\n        ws = tuple([scale * w for w in ws_ref])\n\n        QuadratureRule.__init__(self, ref_el, xs, ws)\n\n\nclass GaussLobattoLegendreQuadratureLineRule(QuadratureRule):\n    \"\"\"Implement the Gauss-Lobatto-Legendre quadrature rules on the interval using\n    Greg von Winckel's implementation. This facilitates implementing\n    spectral elements.\n\n    The quadrature rule uses m points for a degree of precision of 2m-3.\n    \"\"\"\n    def __init__(self, ref_el, m):\n        if m < 2:\n            raise ValueError(\n                \"Gauss-Labotto-Legendre quadrature invalid for fewer than 2 points\")\n\n        Ref1 = reference_element.DefaultLine()\n        verts = Ref1.get_vertices()\n\n        if m > 2:\n            # Calculate the recursion coefficients.\n            alpha, beta = orthopoly.rec_jacobi(m, 0, 0)\n            xs_ref, ws_ref = orthopoly.lobatto(alpha, beta, verts[0][0], verts[1][0])\n        else:\n            # Special case for lowest order.\n            xs_ref = [v[0] for v in verts[:]]\n            ws_ref = (0.5 * (xs_ref[1] - xs_ref[0]), ) * 2\n\n        A, b = reference_element.make_affine_mapping(Ref1.get_vertices(),\n                                                     ref_el.get_vertices())\n\n        mapping = lambda x: numpy.dot(A, x) + b\n\n        scale = numpy.linalg.det(A)\n\n        xs = tuple([tuple(mapping(x_ref)[0]) for x_ref in xs_ref])\n        ws = tuple([scale * w for w in ws_ref])\n\n        QuadratureRule.__init__(self, ref_el, xs, ws)\n\n\nclass GaussLegendreQuadratureLineRule(QuadratureRule):\n    \"\"\"Produce the Gauss--Legendre quadrature rules on the interval using\n    the implementation in numpy. This facilitates implementing\n    discontinuous spectral elements.\n\n    The quadrature rule uses m points for a degree of precision of 2m-1.\n    \"\"\"\n    def __init__(self, ref_el, m):\n        if m < 1:\n            raise ValueError(\n                \"Gauss-Legendre quadrature invalid for fewer than 2 points\")\n\n        xs_ref, ws_ref = numpy.polynomial.legendre.leggauss(m)\n\n        A, b = reference_element.make_affine_mapping(((-1.,), (1.)),\n                                                     ref_el.get_vertices())\n\n        mapping = lambda x: numpy.dot(A, x) + b\n\n        scale = numpy.linalg.det(A)\n\n        xs = tuple([tuple(mapping(x_ref)[0]) for x_ref in xs_ref])\n        ws = tuple([scale * w for w in ws_ref])\n\n        QuadratureRule.__init__(self, ref_el, xs, ws)\n\n\nclass CollapsedQuadratureTriangleRule(QuadratureRule):\n    \"\"\"Implements the collapsed quadrature rules defined in\n    Karniadakis & Sherwin by mapping products of Gauss-Jacobi rules\n    from the square to the triangle.\"\"\"\n\n    def __init__(self, ref_el, m):\n        ptx, wx = compute_gauss_jacobi_rule(0., 0., m)\n        pty, wy = compute_gauss_jacobi_rule(1., 0., m)\n\n        # map ptx , pty\n        pts_ref = [expansions.xi_triangle((x, y))\n                   for x in ptx for y in pty]\n\n        Ref1 = reference_element.DefaultTriangle()\n        A, b = reference_element.make_affine_mapping(Ref1.get_vertices(),\n                                                     ref_el.get_vertices())\n        mapping = lambda x: numpy.dot(A, x) + b\n\n        scale = numpy.linalg.det(A)\n\n        pts = tuple([tuple(mapping(x)) for x in pts_ref])\n\n        wts = [0.5 * scale * w1 * w2 for w1 in wx for w2 in wy]\n\n        QuadratureRule.__init__(self, ref_el, tuple(pts), tuple(wts))\n\n\nclass CollapsedQuadratureTetrahedronRule(QuadratureRule):\n    \"\"\"Implements the collapsed quadrature rules defined in\n    Karniadakis & Sherwin by mapping products of Gauss-Jacobi rules\n    from the cube to the tetrahedron.\"\"\"\n\n    def __init__(self, ref_el, m):\n        ptx, wx = compute_gauss_jacobi_rule(0., 0., m)\n        pty, wy = compute_gauss_jacobi_rule(1., 0., m)\n        ptz, wz = compute_gauss_jacobi_rule(2., 0., m)\n\n        # map ptx , pty\n        pts_ref = [expansions.xi_tetrahedron((x, y, z))\n                   for x in ptx for y in pty for z in ptz]\n\n        Ref1 = reference_element.DefaultTetrahedron()\n        A, b = reference_element.make_affine_mapping(Ref1.get_vertices(),\n                                                     ref_el.get_vertices())\n        mapping = lambda x: numpy.dot(A, x) + b\n\n        scale = numpy.linalg.det(A)\n\n        pts = tuple([tuple(mapping(x)) for x in pts_ref])\n\n        wts = [scale * 0.125 * w1 * w2 * w3\n               for w1 in wx for w2 in wy for w3 in wz]\n\n        QuadratureRule.__init__(self, ref_el, tuple(pts), tuple(wts))\n\n\nclass UFCTetrahedronFaceQuadratureRule(QuadratureRule):\n    \"\"\"Highly specialized quadrature rule for the face of a\n    tetrahedron, mapped from a reference triangle, used for higher\n    order Nedelecs\"\"\"\n\n    def __init__(self, face_number, degree):\n\n        # Create quadrature rule on reference triangle\n        reference_triangle = reference_element.UFCTriangle()\n        reference_rule = make_quadrature(reference_triangle, degree)\n        ref_points = reference_rule.get_points()\n        ref_weights = reference_rule.get_weights()\n\n        # Get geometry information about the face of interest\n        reference_tet = reference_element.UFCTetrahedron()\n        face = reference_tet.get_topology()[2][face_number]\n        vertices = reference_tet.get_vertices_of_subcomplex(face)\n\n        # Use tet to map points and weights on the appropriate face\n        vertices = [numpy.array(list(vertex)) for vertex in vertices]\n        x0 = vertices[0]\n        J = numpy.matrix([vertices[1] - x0, vertices[2] - x0]).transpose()\n        x0 = numpy.matrix(x0).transpose()\n        # This is just a very numpyfied way of writing J*p + x0:\n        F = lambda p: \\\n            numpy.array(J*numpy.matrix(p).transpose() + x0).flatten()\n        points = numpy.array([F(p) for p in ref_points])\n\n        # Map weights: multiply reference weights by sqrt(|J^T J|)\n        detJTJ = numpy.linalg.det(J.transpose() * J)\n        weights = numpy.sqrt(detJTJ) * ref_weights\n\n        # Initialize super class with new points and weights\n        QuadratureRule.__init__(self, reference_tet, points, weights)\n        self._reference_rule = reference_rule\n        self._J = J\n\n    def reference_rule(self):\n        return self._reference_rule\n\n    def jacobian(self):\n        return self._J\n\n\ndef make_quadrature(ref_el, m):\n    \"\"\"Returns the collapsed quadrature rule using m points per\n    direction on the given reference element. In the tensor product\n    case, m is a tuple.\"\"\"\n\n    if isinstance(m, tuple):\n        min_m = min(m)\n    else:\n        min_m = m\n\n    msg = \"Expecting at least one (not %d) quadrature point per direction\" % min_m\n    assert (min_m > 0), msg\n\n    if ref_el.get_shape() == reference_element.POINT:\n        return QuadratureRule(ref_el, [()], [1])\n    elif ref_el.get_shape() == reference_element.LINE:\n        return GaussJacobiQuadratureLineRule(ref_el, m)\n    elif ref_el.get_shape() == reference_element.TRIANGLE:\n        return CollapsedQuadratureTriangleRule(ref_el, m)\n    elif ref_el.get_shape() == reference_element.TETRAHEDRON:\n        return CollapsedQuadratureTetrahedronRule(ref_el, m)\n\n\ndef make_tensor_product_quadrature(*quad_rules):\n    \"\"\"Returns the quadrature rule for a TensorProduct cell, by combining\n    the quadrature rules of the components.\"\"\"\n    ref_el = reference_element.TensorProductCell(*[q.ref_el\n                                                   for q in quad_rules])\n    # Coordinates are \"concatenated\", weights are multiplied\n    pts = [list(itertools.chain(*pt_tuple))\n           for pt_tuple in itertools.product(*[q.pts for q in quad_rules])]\n    wts = [numpy.prod(wt_tuple)\n           for wt_tuple in itertools.product(*[q.wts for q in quad_rules])]\n    return QuadratureRule(ref_el, pts, wts)\n\n\n# rule to get Gauss-Jacobi points\ndef compute_gauss_jacobi_points(a, b, m):\n    \"\"\"Computes the m roots of P_{m}^{a,b} on [-1,1] by Newton's method.\n    The initial guesses are the Chebyshev points.  Algorithm\n    implemented in Python from the pseudocode given by Karniadakis and\n    Sherwin\"\"\"\n    x = []\n    eps = 1.e-8\n    max_iter = 100\n    for k in range(0, m):\n        r = -math.cos((2.0 * k + 1.0) * math.pi / (2.0 * m))\n        if k > 0:\n            r = 0.5 * (r + x[k - 1])\n        j = 0\n        delta = 2 * eps\n        while j < max_iter:\n            s = 0\n            for i in range(0, k):\n                s = s + 1.0 / (r - x[i])\n            f = jacobi.eval_jacobi(a, b, m, r)\n            fp = jacobi.eval_jacobi_deriv(a, b, m, r)\n            delta = f / (fp - f * s)\n\n            r = r - delta\n\n            if math.fabs(delta) < eps:\n                break\n            else:\n                j = j + 1\n\n        x.append(r)\n    return x\n\n\ndef compute_gauss_jacobi_rule(a, b, m):\n    xs = compute_gauss_jacobi_points(a, b, m)\n\n    a1 = math.pow(2, a + b + 1)\n    a2 = math.gamma(a + m + 1)\n    a3 = math.gamma(b + m + 1)\n    a4 = math.gamma(a + b + m + 1)\n    a5 = math.factorial(m)\n    a6 = a1 * a2 * a3 / a4 / a5\n\n    ws = [a6 / (1.0 - x**2.0) / jacobi.eval_jacobi_deriv(a, b, m, x)**2.0\n          for x in xs]\n\n    return xs, ws\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/quadrature.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, 1, 1, 1, null, null, 1, null, null, 1, null, 1, null, 1, null, null, null, 1, null, null, 1, null, 1, 1, null, 1, null, 0, null, 1, null, null, null, 1, 1, null, null, 1, 1, null, null, 1, 1, null, null, 1, 1, 1, null, 1, null, null, 0], "source": "# -*- coding: utf-8 -*-\n\n# Copyright (C) 2007-2016 Kristian B. Oelgaard\n# Copyright (C) 2017 Mikl\u00f3s Homolya\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n#\n# Modified by Garth N. Wells 2006-2009\n\nimport numpy\n\nfrom FIAT.dual_set import DualSet\nfrom FIAT.finite_element import FiniteElement\nfrom FIAT.functional import PointEvaluation\n\n\nclass QuadratureElement(FiniteElement):\n    \"\"\"A set of quadrature points pretending to be a finite element.\"\"\"\n\n    def __init__(self, ref_el, points):\n        # Create entity dofs.\n        entity_dofs = {dim: {entity: [] for entity in entities}\n                       for dim, entities in ref_el.get_topology().items()}\n        entity_dofs[ref_el.get_dimension()] = {0: list(range(len(points)))}\n\n        # The dual nodes are PointEvaluations at the quadrature points.\n        # FIXME: KBO: Check if this gives expected results for code like evaluate_dof.\n        nodes = [PointEvaluation(ref_el, tuple(point)) for point in points]\n\n        # Construct the dual set\n        dual = DualSet(nodes, ref_el, entity_dofs)\n\n        super(QuadratureElement, self).__init__(ref_el, dual, order=None)\n        self._points = points  # save the quadrature points\n\n    def value_shape(self):\n        \"The QuadratureElement is scalar valued\"\n        return ()\n\n    def tabulate(self, order, points, entity=None):\n        \"\"\"Return the identity matrix of size (num_quad_points, num_quad_points),\n        in a format that monomialintegration and monomialtabulation understands.\"\"\"\n\n        if entity is not None and entity != (self.ref_el.get_dimension(), 0):\n            raise ValueError('QuadratureElement does not \"tabulate\" on subentities.')\n\n        # Derivatives are not defined on a QuadratureElement\n        if order:\n            raise ValueError(\"Derivatives are not defined on a QuadratureElement.\")\n\n        # Check that incoming points are equal to the quadrature points.\n        if len(points) != len(self._points) or abs(numpy.array(points) - self._points).max() > 1e-12:\n            raise AssertionError(\"Mismatch of quadrature points!\")\n\n        # Return the identity matrix of size len(self._points).\n        values = numpy.eye(len(self._points))\n        dim = self.ref_el.get_spatial_dimension()\n        return {(0,) * dim: values}\n\n    @staticmethod\n    def is_nodal():\n        # No polynomial basis, but still nodal.\n        return True\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/quadrature_element.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, null, 1, 1, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, 1, 1, null, 1, 1, null, 1, null, 1, 1, null, 1, 1, null, 1, null, null, 1, 1, 1, 1, null, 1, 1, 1, null, 0, null, null, 1, null, null, null, 1, null, null, 1, null, null, 1, null, null, null, 1, null, 1, 1, 1, null, 1, null, null, 1, 1, 1, null, 1, null, null, null, null, null, 1, 1, 1, null, 1, null, null, null, null, null, 1, 1, 1, 1, 1, null, 1, null, null, null, null, null, null, 1, 1, 1, 1, 1, 1, null, 1, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, 1, 1, null, null, 1, null, null, 1, null, null, 1, null, null, null, 1, null, 1, 1, 1, null, 1, 1, null, null, null, 1, 1, 1, null, null, 1, null, null, null, null, 1, 1, 1, 1, 1, null, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, 1, 1, 1, null, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, 1, 1, 1, 1, null, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, 1, 1, 1, null, null, 1, null, null, 1], "source": "\"\"\"Quadrature schemes on cells\n\nThis module generates quadrature schemes on reference cells that integrate\nexactly a polynomial of a given degree using a specified scheme.\n\nScheme options are:\n\n  scheme=\"default\"\n\n  scheme=\"canonical\" (collapsed Gauss scheme)\n\nBackground on the schemes:\n\n  Keast rules for tetrahedra:\n    Keast, P. Moderate-degree tetrahedral quadrature formulas, Computer\n    Methods in Applied Mechanics and Engineering 55(3):339-348, 1986.\n    http://dx.doi.org/10.1016/0045-7825(86)90059-9\n\"\"\"\n\n# Copyright (C) 2011 Garth N. Wells\n# Copyright (C) 2016 Miklos Homolya\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n#\n# First added:  2011-04-19\n# Last changed: 2011-04-19\n\n# NumPy\nfrom numpy import array, arange, float64\n\n# FIAT\nfrom FIAT.reference_element import QUADRILATERAL, HEXAHEDRON, TENSORPRODUCT, UFCTriangle, UFCTetrahedron\nfrom FIAT.quadrature import QuadratureRule, make_quadrature, make_tensor_product_quadrature\n\n\ndef create_quadrature(ref_el, degree, scheme=\"default\"):\n    \"\"\"\n    Generate quadrature rule for given reference element\n    that will integrate an polynomial of order 'degree' exactly.\n\n    For low-degree (<=6) polynomials on triangles and tetrahedra, this\n    uses hard-coded rules, otherwise it falls back to a collapsed\n    Gauss scheme on simplices.  On tensor-product cells, it is a\n    tensor-product quadrature rule of the subcells.\n\n    :arg cell: The FIAT cell to create the quadrature for.\n    :arg degree: The degree of polynomial that the rule should\n        integrate exactly.\n    \"\"\"\n    if ref_el.get_shape() == TENSORPRODUCT:\n        try:\n            degree = tuple(degree)\n        except TypeError:\n            degree = (degree,) * len(ref_el.cells)\n\n        assert len(ref_el.cells) == len(degree)\n        quad_rules = [create_quadrature(c, d, scheme)\n                      for c, d in zip(ref_el.cells, degree)]\n        return make_tensor_product_quadrature(*quad_rules)\n\n    if ref_el.get_shape() in [QUADRILATERAL, HEXAHEDRON]:\n        return create_quadrature(ref_el.product, degree, scheme)\n\n    if degree < 0:\n        raise ValueError(\"Need positive degree, not %d\" % degree)\n\n    if scheme == \"default\":\n        # TODO: Point transformation to support good schemes on\n        # non-UFC reference elements.\n        if isinstance(ref_el, UFCTriangle):\n            return _triangle_scheme(degree)\n        elif isinstance(ref_el, UFCTetrahedron):\n            return _tetrahedron_scheme(degree)\n        else:\n            return _fiat_scheme(ref_el, degree)\n    elif scheme == \"canonical\":\n        return _fiat_scheme(ref_el, degree)\n    else:\n        raise ValueError(\"Unknown quadrature scheme: %s.\" % scheme)\n\n\ndef _fiat_scheme(ref_el, degree):\n    \"\"\"Get quadrature scheme from FIAT interface\"\"\"\n\n    # Number of points per axis for exact integration\n    num_points_per_axis = (degree + 1 + 1) // 2\n\n    # Create and return FIAT quadrature rule\n    return make_quadrature(ref_el, num_points_per_axis)\n\n\ndef _triangle_scheme(degree):\n    \"\"\"Return a quadrature scheme on a triangle of specified order. Falls\n    back on canonical rule for higher orders.\"\"\"\n\n    if degree == 0 or degree == 1:\n        # Scheme from Zienkiewicz and Taylor, 1 point, degree of precision 1\n        x = array([[1.0/3.0, 1.0/3.0]])\n        w = array([0.5])\n    elif degree == 2:\n        # Scheme from Strang and Fix, 3 points, degree of precision 2\n        x = array([[1.0/6.0, 1.0/6.0],\n                   [1.0/6.0, 2.0/3.0],\n                   [2.0/3.0, 1.0/6.0]])\n        w = arange(3, dtype=float64)\n        w[:] = 1.0/6.0\n    elif degree == 3:\n        # Scheme from Strang and Fix, 6 points, degree of precision 3\n        x = array([[0.659027622374092, 0.231933368553031],\n                   [0.659027622374092, 0.109039009072877],\n                   [0.231933368553031, 0.659027622374092],\n                   [0.231933368553031, 0.109039009072877],\n                   [0.109039009072877, 0.659027622374092],\n                   [0.109039009072877, 0.231933368553031]])\n        w = arange(6, dtype=float64)\n        w[:] = 1.0/12.0\n    elif degree == 4:\n        # Scheme from Strang and Fix, 6 points, degree of precision 4\n        x = array([[0.816847572980459, 0.091576213509771],\n                   [0.091576213509771, 0.816847572980459],\n                   [0.091576213509771, 0.091576213509771],\n                   [0.108103018168070, 0.445948490915965],\n                   [0.445948490915965, 0.108103018168070],\n                   [0.445948490915965, 0.445948490915965]])\n        w = arange(6, dtype=float64)\n        w[0:3] = 0.109951743655322\n        w[3:6] = 0.223381589678011\n        w = w/2.0\n    elif degree == 5:\n        # Scheme from Strang and Fix, 7 points, degree of precision 5\n        x = array([[0.33333333333333333, 0.33333333333333333],\n                   [0.79742698535308720, 0.10128650732345633],\n                   [0.10128650732345633, 0.79742698535308720],\n                   [0.10128650732345633, 0.10128650732345633],\n                   [0.05971587178976981, 0.47014206410511505],\n                   [0.47014206410511505, 0.05971587178976981],\n                   [0.47014206410511505, 0.47014206410511505]])\n        w = arange(7, dtype=float64)\n        w[0] = 0.22500000000000000\n        w[1:4] = 0.12593918054482717\n        w[4:7] = 0.13239415278850616\n        w = w/2.0\n    elif degree == 6:\n        # Scheme from Strang and Fix, 12 points, degree of precision 6\n        x = array([[0.873821971016996, 0.063089014491502],\n                   [0.063089014491502, 0.873821971016996],\n                   [0.063089014491502, 0.063089014491502],\n                   [0.501426509658179, 0.249286745170910],\n                   [0.249286745170910, 0.501426509658179],\n                   [0.249286745170910, 0.249286745170910],\n                   [0.636502499121399, 0.310352451033785],\n                   [0.636502499121399, 0.053145049844816],\n                   [0.310352451033785, 0.636502499121399],\n                   [0.310352451033785, 0.053145049844816],\n                   [0.053145049844816, 0.636502499121399],\n                   [0.053145049844816, 0.310352451033785]])\n        w = arange(12, dtype=float64)\n        w[0:3] = 0.050844906370207\n        w[3:6] = 0.116786275726379\n        w[6:12] = 0.082851075618374\n        w = w/2.0\n    else:\n        # Get canonical scheme\n        return _fiat_scheme(UFCTriangle(), degree)\n\n    # Return scheme\n    return QuadratureRule(UFCTriangle(), x, w)\n\n\ndef _tetrahedron_scheme(degree):\n    \"\"\"Return a quadrature scheme on a tetrahedron of specified\n    degree. Falls back on canonical rule for higher orders\"\"\"\n\n    if degree == 0 or degree == 1:\n        # Scheme from Zienkiewicz and Taylor, 1 point, degree of precision 1\n        x = array([[1.0/4.0, 1.0/4.0, 1.0/4.0]])\n        w = array([1.0/6.0])\n    elif degree == 2:\n        # Scheme from Zienkiewicz and Taylor, 4 points, degree of precision 2\n        a, b = 0.585410196624969, 0.138196601125011\n        x = array([[a, b, b],\n                   [b, a, b],\n                   [b, b, a],\n                   [b, b, b]])\n        w = arange(4, dtype=float64)\n        w[:] = 1.0/24.0\n    elif degree == 3:\n        # Scheme from Zienkiewicz and Taylor, 5 points, degree of precision 3\n        # Note: this scheme has a negative weight\n        x = array([[0.2500000000000000, 0.2500000000000000, 0.2500000000000000],\n                   [0.5000000000000000, 0.1666666666666666, 0.1666666666666666],\n                   [0.1666666666666666, 0.5000000000000000, 0.1666666666666666],\n                   [0.1666666666666666, 0.1666666666666666, 0.5000000000000000],\n                   [0.1666666666666666, 0.1666666666666666, 0.1666666666666666]])\n        w = arange(5, dtype=float64)\n        w[0] = -0.8\n        w[1:5] = 0.45\n        w = w/6.0\n    elif degree == 4:\n        # Keast rule, 14 points, degree of precision 4\n        # Values taken from http://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_tet/quadrature_rules_tet.html\n        # (KEAST5)\n        x = array([[0.0000000000000000, 0.5000000000000000, 0.5000000000000000],\n                   [0.5000000000000000, 0.0000000000000000, 0.5000000000000000],\n                   [0.5000000000000000, 0.5000000000000000, 0.0000000000000000],\n                   [0.5000000000000000, 0.0000000000000000, 0.0000000000000000],\n                   [0.0000000000000000, 0.5000000000000000, 0.0000000000000000],\n                   [0.0000000000000000, 0.0000000000000000, 0.5000000000000000],\n                   [0.6984197043243866, 0.1005267652252045, 0.1005267652252045],\n                   [0.1005267652252045, 0.1005267652252045, 0.1005267652252045],\n                   [0.1005267652252045, 0.1005267652252045, 0.6984197043243866],\n                   [0.1005267652252045, 0.6984197043243866, 0.1005267652252045],\n                   [0.0568813795204234, 0.3143728734931922, 0.3143728734931922],\n                   [0.3143728734931922, 0.3143728734931922, 0.3143728734931922],\n                   [0.3143728734931922, 0.3143728734931922, 0.0568813795204234],\n                   [0.3143728734931922, 0.0568813795204234, 0.3143728734931922]])\n        w = arange(14, dtype=float64)\n        w[0:6] = 0.0190476190476190\n        w[6:10] = 0.0885898247429807\n        w[10:14] = 0.1328387466855907\n        w = w/6.0\n    elif degree == 5:\n        # Keast rule, 15 points, degree of precision 5\n        # Values taken from http://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_tet/quadrature_rules_tet.html\n        # (KEAST6)\n        x = array([[0.2500000000000000, 0.2500000000000000, 0.2500000000000000],\n                   [0.0000000000000000, 0.3333333333333333, 0.3333333333333333],\n                   [0.3333333333333333, 0.3333333333333333, 0.3333333333333333],\n                   [0.3333333333333333, 0.3333333333333333, 0.0000000000000000],\n                   [0.3333333333333333, 0.0000000000000000, 0.3333333333333333],\n                   [0.7272727272727273, 0.0909090909090909, 0.0909090909090909],\n                   [0.0909090909090909, 0.0909090909090909, 0.0909090909090909],\n                   [0.0909090909090909, 0.0909090909090909, 0.7272727272727273],\n                   [0.0909090909090909, 0.7272727272727273, 0.0909090909090909],\n                   [0.4334498464263357, 0.0665501535736643, 0.0665501535736643],\n                   [0.0665501535736643, 0.4334498464263357, 0.0665501535736643],\n                   [0.0665501535736643, 0.0665501535736643, 0.4334498464263357],\n                   [0.0665501535736643, 0.4334498464263357, 0.4334498464263357],\n                   [0.4334498464263357, 0.0665501535736643, 0.4334498464263357],\n                   [0.4334498464263357, 0.4334498464263357, 0.0665501535736643]])\n        w = arange(15, dtype=float64)\n        w[0] = 0.1817020685825351\n        w[1:5] = 0.0361607142857143\n        w[5:9] = 0.0698714945161738\n        w[9:15] = 0.0656948493683187\n        w = w/6.0\n    elif degree == 6:\n        # Keast rule, 24 points, degree of precision 6\n        # Values taken from http://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_tet/quadrature_rules_tet.html\n        # (KEAST7)\n        x = array([[0.3561913862225449, 0.2146028712591517, 0.2146028712591517],\n                   [0.2146028712591517, 0.2146028712591517, 0.2146028712591517],\n                   [0.2146028712591517, 0.2146028712591517, 0.3561913862225449],\n                   [0.2146028712591517, 0.3561913862225449, 0.2146028712591517],\n                   [0.8779781243961660, 0.0406739585346113, 0.0406739585346113],\n                   [0.0406739585346113, 0.0406739585346113, 0.0406739585346113],\n                   [0.0406739585346113, 0.0406739585346113, 0.8779781243961660],\n                   [0.0406739585346113, 0.8779781243961660, 0.0406739585346113],\n                   [0.0329863295731731, 0.3223378901422757, 0.3223378901422757],\n                   [0.3223378901422757, 0.3223378901422757, 0.3223378901422757],\n                   [0.3223378901422757, 0.3223378901422757, 0.0329863295731731],\n                   [0.3223378901422757, 0.0329863295731731, 0.3223378901422757],\n                   [0.2696723314583159, 0.0636610018750175, 0.0636610018750175],\n                   [0.0636610018750175, 0.2696723314583159, 0.0636610018750175],\n                   [0.0636610018750175, 0.0636610018750175, 0.2696723314583159],\n                   [0.6030056647916491, 0.0636610018750175, 0.0636610018750175],\n                   [0.0636610018750175, 0.6030056647916491, 0.0636610018750175],\n                   [0.0636610018750175, 0.0636610018750175, 0.6030056647916491],\n                   [0.0636610018750175, 0.2696723314583159, 0.6030056647916491],\n                   [0.2696723314583159, 0.6030056647916491, 0.0636610018750175],\n                   [0.6030056647916491, 0.0636610018750175, 0.2696723314583159],\n                   [0.0636610018750175, 0.6030056647916491, 0.2696723314583159],\n                   [0.2696723314583159, 0.0636610018750175, 0.6030056647916491],\n                   [0.6030056647916491, 0.2696723314583159, 0.0636610018750175]])\n        w = arange(24, dtype=float64)\n        w[0:4] = 0.0399227502581679\n        w[4:8] = 0.0100772110553207\n        w[8:12] = 0.0553571815436544\n        w[12:24] = 0.0482142857142857\n        w = w/6.0\n    else:\n        # Get canonical scheme\n        return _fiat_scheme(UFCTetrahedron(), degree)\n\n    # Return scheme\n    return QuadratureRule(UFCTetrahedron(), x, w)\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/quadrature_schemes.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, 1, 1, null, null, 1, null, null, 1, null, 1, null, 1, 1, 1, null, 1, null, 1, null, 1, 1, null, 1, null, null, null, 1, 1, null, 1, null, 1, 1, null, 1, null, null, null, 1, 1, 1, 1, null, 1, null, null, null, null, null, null, 1, null, null, 1, null, null, null, null, 1, 1, 1, null, 1, 1, null, null, 1, 1, 1, 1, 1, 1, null, null, 1, 1, 1, 1, 1, 1, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, 1, null, 1, null, null, 1, 1, 1, 1, 1, 1, null, null, 1, 1, 1, null, 1, null, 1, null, null, 1, null, null, 1, null, 1, 1, 1, 1, 1, null], "source": "# Copyright (C) 2008-2012 Robert C. Kirby (Texas Tech University)\n# Modified by Andrew T. T. McRae (Imperial College London)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nfrom FIAT import (expansions, polynomial_set, quadrature, dual_set,\n                  finite_element, functional)\nimport numpy\nfrom itertools import chain\n\n\ndef RTSpace(ref_el, deg):\n    \"\"\"Constructs a basis for the the Raviart-Thomas space\n    (P_k)^d + P_k x\"\"\"\n    sd = ref_el.get_spatial_dimension()\n\n    vec_Pkp1 = polynomial_set.ONPolynomialSet(ref_el, deg + 1, (sd,))\n\n    dimPkp1 = expansions.polynomial_dimension(ref_el, deg + 1)\n    dimPk = expansions.polynomial_dimension(ref_el, deg)\n    dimPkm1 = expansions.polynomial_dimension(ref_el, deg - 1)\n\n    vec_Pk_indices = list(chain(*(range(i * dimPkp1, i * dimPkp1 + dimPk)\n                                  for i in range(sd))))\n    vec_Pk_from_Pkp1 = vec_Pkp1.take(vec_Pk_indices)\n\n    Pkp1 = polynomial_set.ONPolynomialSet(ref_el, deg + 1)\n    PkH = Pkp1.take(list(range(dimPkm1, dimPk)))\n\n    Q = quadrature.make_quadrature(ref_el, 2 * deg + 2)\n\n    # have to work on this through \"tabulate\" interface\n    # first, tabulate PkH at quadrature points\n    Qpts = numpy.array(Q.get_points())\n    Qwts = numpy.array(Q.get_weights())\n\n    zero_index = tuple([0 for i in range(sd)])\n\n    PkH_at_Qpts = PkH.tabulate(Qpts)[zero_index]\n    Pkp1_at_Qpts = Pkp1.tabulate(Qpts)[zero_index]\n\n    PkHx_coeffs = numpy.zeros((PkH.get_num_members(),\n                               sd,\n                               Pkp1.get_num_members()), \"d\")\n\n    for i in range(PkH.get_num_members()):\n        for j in range(sd):\n            fooij = PkH_at_Qpts[i, :] * Qpts[:, j] * Qwts\n            PkHx_coeffs[i, j, :] = numpy.dot(Pkp1_at_Qpts, fooij)\n\n    PkHx = polynomial_set.PolynomialSet(ref_el,\n                                        deg,\n                                        deg + 1,\n                                        vec_Pkp1.get_expansion_set(),\n                                        PkHx_coeffs,\n                                        vec_Pkp1.get_dmats())\n\n    return polynomial_set.polynomial_set_union_normalized(vec_Pk_from_Pkp1, PkHx)\n\n\nclass RTDualSet(dual_set.DualSet):\n    \"\"\"Dual basis for Raviart-Thomas elements consisting of point\n    evaluation of normals on facets of codimension 1 and internal\n    moments against polynomials\"\"\"\n\n    def __init__(self, ref_el, degree):\n        entity_ids = {}\n        nodes = []\n\n        sd = ref_el.get_spatial_dimension()\n        t = ref_el.get_topology()\n\n        # codimension 1 facets\n        for i in range(len(t[sd - 1])):\n            pts_cur = ref_el.make_points(sd - 1, i, sd + degree)\n            for j in range(len(pts_cur)):\n                pt_cur = pts_cur[j]\n                f = functional.PointScaledNormalEvaluation(ref_el, i, pt_cur)\n                nodes.append(f)\n\n        # internal nodes.  Let's just use points at a lattice\n        if degree > 0:\n            cpe = functional.ComponentPointEvaluation\n            pts = ref_el.make_points(sd, 0, degree + sd)\n            for d in range(sd):\n                for i in range(len(pts)):\n                    l_cur = cpe(ref_el, d, (sd,), pts[i])\n                    nodes.append(l_cur)\n\n            # Q = quadrature.make_quadrature(ref_el, 2 * ( degree + 1 ))\n            # qpts = Q.get_points()\n            # Pkm1 = polynomial_set.ONPolynomialSet(ref_el, degree - 1)\n            # zero_index = tuple([0 for i in range(sd)])\n            # Pkm1_at_qpts = Pkm1.tabulate(qpts)[zero_index]\n\n            # for d in range(sd):\n            #     for i in range(Pkm1_at_qpts.shape[0]):\n            #         phi_cur = Pkm1_at_qpts[i, :]\n            #         l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,))\n            #         nodes.append(l_cur)\n\n        # sets vertices (and in 3d, edges) to have no nodes\n        for i in range(sd - 1):\n            entity_ids[i] = {}\n            for j in range(len(t[i])):\n                entity_ids[i][j] = []\n\n        cur = 0\n\n        # set codimension 1 (edges 2d, faces 3d) dof\n        pts_facet_0 = ref_el.make_points(sd - 1, 0, sd + degree)\n        pts_per_facet = len(pts_facet_0)\n        entity_ids[sd - 1] = {}\n        for i in range(len(t[sd - 1])):\n            entity_ids[sd - 1][i] = list(range(cur, cur + pts_per_facet))\n            cur += pts_per_facet\n\n        # internal nodes, if applicable\n        entity_ids[sd] = {0: []}\n        if degree > 0:\n            num_internal_nodes = expansions.polynomial_dimension(ref_el,\n                                                                 degree - 1)\n            entity_ids[sd][0] = list(range(cur, cur + num_internal_nodes * sd))\n\n        super(RTDualSet, self).__init__(nodes, ref_el, entity_ids)\n\n\nclass RaviartThomas(finite_element.CiarletElement):\n    \"\"\"The Raviart-Thomas finite element\"\"\"\n\n    def __init__(self, ref_el, q):\n\n        degree = q - 1\n        poly_set = RTSpace(ref_el, degree)\n        dual = RTDualSet(ref_el, degree)\n        formdegree = ref_el.get_spatial_dimension() - 1  # (n-1)-form\n        super(RaviartThomas, self).__init__(poly_set, dual, degree, formdegree,\n                                            mapping=\"contravariant piola\")\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/raviart_thomas.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, 1, 1, null, 1, null, null, 1, 1, 1, 1, 1, 1, 1, null, null, 1, null, null, null, 1, 0, 1, 1, 1, null, 1, 1, 1, null, null, 1, null, null, null, null, null, null, null, null, 1, 1, null, 1, 1, null, null, 1, null, null, 1, null, null, null, null, null, null, null, null, 1, 0, null, null, null, null, null, null, 1, 1, null, 1, null, 1, null, 1, null, 1, null, 1, null, null, 1, null, null, null, null, 1, null, null, null, null, null, null, null, 1, 1, 1, null, null, null, 1, 1, 1, null, 1, 1, 1, null, 1, 1, 1, 1, null, null, 1, null, null, 1, 1, null, null, null, 1, 1, null, 1, 1, 1, 1, 1, null, 1, null, null, 1, null, 1, 1, null, 1, 0, null, 1, 1, null, 1, null, 1, null, 1, null, null, 1, null, 1, null, 1, null, 1, null, null, null, null, 1, null, 1, null, null, null, null, null, 1, null, 1, null, null, 1, null, 1, null, null, null, null, 0, null, 1, null, null, null, null, null, 0, null, 1, null, null, null, null, null, null, 0, null, null, 1, null, null, 1, null, null, 1, 1, 1, 1, 1, null, null, null, null, 1, null, 1, null, null, 1, 1, null, null, 1, null, 1, 1, 1, null, null, null, 1, null, 1, null, 1, null, null, 1, 1, null, null, null, 1, null, 1, null, 1, 0, null, null, null, 1, null, null, 1, 1, 1, 1, 0, 1, 1, null, null, 1, null, null, 1, 1, null, 1, null, 1, null, null, null, null, 1, 1, 1, 1, null, 1, null, null, null, null, 0, 0, null, 1, null, null, 1, 1, 1, null, 1, null, 0, 0, null, 1, null, null, 1, 0, 1, 1, null, 1, null, 1, null, null, null, null, 1, 1, null, 1, 1, 1, 1, 1, null, 1, null, null, null, 1, 1, 1, 1, null, null, 1, 1, 1, null, 0, null, 1, null, null, 1, null, 1, 1, 1, null, 1, null, null, 1, 1, 1, null, 1, null, 1, 1, 1, null, 1, null, null, null, null, null, null, 1, 1, 1, 1, null, 1, 1, 1, 1, 1, 1, null, 1, 1, 0, null, 0, 0, null, 1, null, 1, null, null, 1, null, 1, null, 1, 1, 1, null, null, 1, null, 1, null, 1, 1, null, 1, null, 1, null, 1, null, 1, null, null, 1, null, null, null, 1, null, null, 1, null, 1, 0, 0, null, 1, null, null, null, null, null, 1, null, 1, null, null, 0, 0, 0, 0, null, null, 1, null, null, 1, 1, 1, 1, null, null, 1, null, null, 1, 1, 1, 1, null, 1, null, 1, 0, null, null, 1, null, null, 1, 1, 1, 1, null, 1, null, null, 1, null, null, null, 1, 1, 1, null, null, 1, 1, null, 1, null, 1, 0, null, null, 1, null, null, null, 1, 1, 1, 1, 1, null, 1, null, 1, null, 1, 1, 1, null, null, 1, null, null, 1, 0, 0, null, null, 0, 0, null, 0, null, 1, null, null, 0, null, null, 1, null, null, null, 1, 1, null, 1, null, null, null, 1, null, null, null, null, null, 1, null, null, null, 1, 1, 1, null, 1, 0, null, null, 1, null, null, null, 1, 0, 0, null, null, null, 0, null, null, null, null, null, 0, null, null, null, 0, 0, 0, null, 1, 0, null, null, 1, null, null, null, 1, 1, 1, null, null, null, 1, null, null, null, null, null, 1, null, null, null, 1, 1, 1, null, 1, null, 1, 1, 1, null, null, 1, null, null, 1, null, 1, null, null, null, null, 1, 1, 1, null, 1, 1, null, 1, 1, null, 1, 1, null, 1, null, null, 1, 1, null, 1, 1, null, 1, null, 1, 1, 1, 1, 1, null, null, 1, null, null, 1, null, 1, null, null, null, null, null, 1, null, null, 1, null, null, null, null, null, null, null, 1, null, 1, null, null, 1, null, null, 1, null, 1, 1, null, 1, null, 1, null, 1, null, 1, null, null, 1, 1, 1, null, 1, 1, 1, 1, null, 1, 1, null, 1, null, null, 0, 0, 0, 0, null, null, null, null, null, 1, null, null, null, 1, 1, 1, null, 1, 1, null, 1, null, 1, 1, null, 1, null, null, 1, null, 1, null, null, null, null, null, 1, 1, 0, 0, 0, 0, null, 0, null, 1, null, null, null, null, null, null, 0, 0, null, 1, null, 1, null, 1, null, 1, 1, 1, null, 1, null, null, 0, null, null, 1, null, null, null, null, 1, 1, 1, null, 1, 1, null, 1, null, 1, 1, null, 1, null, null, 0, null, 1, null, null, null, null, null, 0, 0, 0, 0, 0, 0, 0, 0, null, 0, null, 1, null, null, null, null, null, null, 0, 0, null, 1, null, 1, null, 1, null, 1, 1, 1, null, 1, null, null, 0, null, null, 1, null, null, null, 1, 1, null, 1, 0, null, null, null, null, 1, 1, null, null, 1, null, 1, 1, 1, 1, 1, 1, null, 1, null, 1, null, 1, 1, null, 1, null, null, 1, null, null, 0, 0, 0, 0, 0, 0, null, 0, null, null, 1, null, null, 1, 1, 1, 1, 1, 1, 1, 1, null, 0, null, null, 1, null, null, null, 0, 0, null, 0, null, 0, null, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, null, 0, null, null, 1, null, null, null, 1, 1, 1, null, 1, null, null, null, null, null, 1, null, null, null, null, 1, null, 1, null, null, 1, null, null, null, null, null, 1, 1, null, 1, null, null, 1, null, null, 1, 1, 1, 1, null, 1, null, null, null, 1, null, null, 1, 1, null, 1, 1, 1, 1, 1, null, 1], "source": "# Copyright (C) 2008 Robert C. Kirby (Texas Tech University)\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n#\n# Modified by David A. Ham (david.ham@imperial.ac.uk), 2014\n# Modified by Lizao Li (lzlarryli@gmail.com), 2016\n\"\"\"\nAbstract class and particular implementations of finite element\nreference simplex geometry/topology.\n\nProvides an abstract base class and particular implementations for the\nreference simplex geometry and topology.\nThe rest of FIAT is abstracted over this module so that different\nreference element geometry (e.g. a vertex at (0,0) versus at (-1,-1))\nand orderings of entities have a single point of entry.\n\nCurrently implemented are UFC and Default Line, Triangle and Tetrahedron.\n\"\"\"\nfrom itertools import chain, product, count\nfrom functools import reduce\nfrom collections import defaultdict\nimport operator\nfrom math import factorial\n\nimport numpy\n\n\nPOINT = 0\nLINE = 1\nTRIANGLE = 2\nTETRAHEDRON = 3\nQUADRILATERAL = 11\nHEXAHEDRON = 111\nTENSORPRODUCT = 99\n\n\ndef lattice_iter(start, finish, depth):\n    \"\"\"Generator iterating over the depth-dimensional lattice of\n    integers between start and (finish-1).  This works on simplices in\n    1d, 2d, 3d, and beyond\"\"\"\n    if depth == 0:\n        return\n    elif depth == 1:\n        for ii in range(start, finish):\n            yield [ii]\n    else:\n        for ii in range(start, finish):\n            for jj in lattice_iter(start, finish - ii, depth - 1):\n                yield jj + [ii]\n\n\ndef make_lattice(verts, n, interior=0):\n    \"\"\"Constructs a lattice of points on the simplex defined by verts.\n    For example, the 1:st order lattice will be just the vertices.\n    The optional argument interior specifies how many points from\n    the boundary to omit.  For example, on a line with n = 2,\n    and interior = 0, this function will return the vertices and\n    midpoint, but with interior = 1, it will only return the\n    midpoint.\"\"\"\n\n    vs = numpy.array(verts)\n    hs = (vs - vs[0])[1:, :] / n\n\n    m = hs.shape[0]\n    result = [tuple(vs[0] + numpy.array(indices).dot(hs))\n              for indices in lattice_iter(interior, n + 1 - interior, m)]\n\n    return result\n\n\ndef linalg_subspace_intersection(A, B):\n    \"\"\"Computes the intersection of the subspaces spanned by the\n    columns of 2-dimensional arrays A,B using the algorithm found in\n    Golub and van Loan (3rd ed) p. 604.  A should be in\n    R^{m,p} and B should be in R^{m,q}.  Returns an orthonormal basis\n    for the intersection of the spaces, stored in the columns of\n    the result.\"\"\"\n\n    # check that vectors are in same space\n    if A.shape[0] != B.shape[0]:\n        raise Exception(\"Dimension error\")\n\n    # A,B are matrices of column vectors\n    # compute the intersection of span(A) and span(B)\n\n    # Compute the principal vectors/angles between the subspaces, G&vL\n    # p.604\n    (qa, _ra) = numpy.linalg.qr(A)\n    (qb, _rb) = numpy.linalg.qr(B)\n\n    C = numpy.dot(numpy.transpose(qa), qb)\n\n    (y, c, _zt) = numpy.linalg.svd(C)\n\n    U = numpy.dot(qa, y)\n\n    rank_c = len([s for s in c if numpy.abs(1.0 - s) < 1.e-10])\n\n    return U[:, :rank_c]\n\n\nclass Cell(object):\n    \"\"\"Abstract class for a reference cell.  Provides accessors for\n    geometry (vertex coordinates) as well as topology (orderings of\n    vertices that make up edges, facecs, etc.\"\"\"\n\n    def __init__(self, shape, vertices, topology):\n        \"\"\"The constructor takes a shape code, the physical vertices expressed\n        as a list of tuples of numbers, and the topology of a cell.\n\n        The topology is stored as a dictionary of dictionaries t[i][j]\n        where i is the dimension and j is the index of the facet of\n        that dimension.  The result is a list of the vertices\n        comprising the facet.\"\"\"\n        self.shape = shape\n        self.vertices = vertices\n        self.topology = topology\n\n        # Given the topology, work out for each entity in the cell,\n        # which other entities it contains.\n        self.sub_entities = {}\n        for dim, entities in topology.items():\n            self.sub_entities[dim] = {}\n\n            for e, v in entities.items():\n                vertices = frozenset(v)\n                sub_entities = []\n\n                for dim_, entities_ in topology.items():\n                    for e_, vertices_ in entities_.items():\n                        if vertices.issuperset(vertices_):\n                            sub_entities.append((dim_, e_))\n\n                # Sort for the sake of determinism and by UFC conventions\n                self.sub_entities[dim][e] = sorted(sub_entities)\n\n        # Build connectivity dictionary for easier queries\n        self.connectivity = {}\n        for dim0, sub_entities in self.sub_entities.items():\n\n            # Skip tensor product entities\n            # TODO: Can we do something better?\n            if isinstance(dim0, tuple):\n                continue\n\n            for entity, sub_sub_entities in sorted(sub_entities.items()):\n                for dim1 in range(dim0+1):\n                    d01_entities = filter(lambda x: x[0] == dim1, sub_sub_entities)\n                    d01_entities = tuple(x[1] for x in d01_entities)\n                    self.connectivity.setdefault((dim0, dim1), []).append(d01_entities)\n\n    def _key(self):\n        \"\"\"Hashable object key data (excluding type).\"\"\"\n        # Default: only type matters\n        return None\n\n    def __eq__(self, other):\n        return type(self) == type(other) and self._key() == other._key()\n\n    def __ne__(self, other):\n        return not self.__eq__(other)\n\n    def __hash__(self):\n        return hash((type(self), self._key()))\n\n    def get_shape(self):\n        \"\"\"Returns the code for the element's shape.\"\"\"\n        return self.shape\n\n    def get_vertices(self):\n        \"\"\"Returns an iterable of the element's vertices, each stored as a\n        tuple.\"\"\"\n        return self.vertices\n\n    def get_spatial_dimension(self):\n        \"\"\"Returns the spatial dimension in which the element lives.\"\"\"\n        return len(self.vertices[0])\n\n    def get_topology(self):\n        \"\"\"Returns a dictionary encoding the topology of the element.\n\n        The dictionary's keys are the spatial dimensions (0, 1, ...)\n        and each value is a dictionary mapping.\"\"\"\n        return self.topology\n\n    def get_connectivity(self):\n        \"\"\"Returns a dictionary encoding the connectivity of the element.\n\n        The dictionary's keys are the spatial dimensions pairs ((1, 0),\n        (2, 0), (2, 1), ...) and each value is a list with entities\n        of second dimension ordered by local dim0-dim1 numbering.\"\"\"\n        return self.connectivity\n\n    def get_vertices_of_subcomplex(self, t):\n        \"\"\"Returns the tuple of vertex coordinates associated with the labels\n        contained in the iterable t.\"\"\"\n        return tuple([self.vertices[ti] for ti in t])\n\n    def get_dimension(self):\n        \"\"\"Returns the subelement dimension of the cell.  For tensor\n        product cells, this a tuple of dimensions for each cell in the\n        product.  For all other cells, this is the same as the spatial\n        dimension.\"\"\"\n        raise NotImplementedError(\"Should be implemented in a subclass.\")\n\n    def construct_subelement(self, dimension):\n        \"\"\"Constructs the reference element of a cell subentity\n        specified by subelement dimension.\n\n        :arg dimension: `tuple` for tensor product cells, `int` otherwise\n        \"\"\"\n        raise NotImplementedError(\"Should be implemented in a subclass.\")\n\n    def get_entity_transform(self, dim, entity_i):\n        \"\"\"Returns a mapping of point coordinates from the\n        `entity_i`-th subentity of dimension `dim` to the cell.\n\n        :arg dim: `tuple` for tensor product cells, `int` otherwise\n        :arg entity_i: entity number (integer)\n        \"\"\"\n        raise NotImplementedError(\"Should be implemented in a subclass.\")\n\n\nclass Simplex(Cell):\n    \"\"\"Abstract class for a reference simplex.\"\"\"\n\n    def compute_normal(self, facet_i):\n        \"\"\"Returns the unit normal vector to facet i of codimension 1.\"\"\"\n        # Interval case\n        if self.get_shape() == LINE:\n            verts = numpy.asarray(self.vertices)\n            v_i, = self.get_topology()[0][facet_i]\n            n = verts[v_i] - verts[[1, 0][v_i]]\n            return n / numpy.linalg.norm(n)\n\n        # first, let's compute the span of the simplex\n        # This is trivial if we have a d-simplex in R^d.\n        # Not so otherwise.\n        vert_vecs = [numpy.array(v)\n                     for v in self.vertices]\n        vert_vecs_foo = numpy.array([vert_vecs[i] - vert_vecs[0]\n                                     for i in range(1, len(vert_vecs))])\n\n        (u, s, vt) = numpy.linalg.svd(vert_vecs_foo)\n        rank = len([si for si in s if si > 1.e-10])\n\n        # this is the set of vectors that span the simplex\n        spanu = u[:, :rank]\n\n        t = self.get_topology()\n        sd = self.get_spatial_dimension()\n        vert_coords_of_facet = \\\n            self.get_vertices_of_subcomplex(t[sd-1][facet_i])\n\n        # now I find everything normal to the facet.\n        vcf = [numpy.array(foo)\n               for foo in vert_coords_of_facet]\n        facet_span = numpy.array([vcf[i] - vcf[0]\n                                  for i in range(1, len(vcf))])\n        (uf, sf, vft) = numpy.linalg.svd(facet_span)\n\n        # now get the null space from vft\n        rankfacet = len([si for si in sf if si > 1.e-10])\n        facet_normal_space = numpy.transpose(vft[rankfacet:, :])\n\n        # now, I have to compute the intersection of\n        # facet_span with facet_normal_space\n        foo = linalg_subspace_intersection(facet_normal_space, spanu)\n\n        num_cols = foo.shape[1]\n\n        if num_cols != 1:\n            raise Exception(\"barf in normal computation\")\n\n        # now need to get the correct sign\n        # get a vector in the direction\n        nfoo = foo[:, 0]\n\n        # what is the vertex not in the facet?\n        verts_set = set(t[sd][0])\n        verts_facet = set(t[sd - 1][facet_i])\n        verts_diff = verts_set.difference(verts_facet)\n        if len(verts_diff) != 1:\n            raise Exception(\"barf in normal computation: getting sign\")\n        vert_off = verts_diff.pop()\n        vert_on = verts_facet.pop()\n\n        # get a vector from the off vertex to the facet\n        v_to_facet = numpy.array(self.vertices[vert_on]) \\\n            - numpy.array(self.vertices[vert_off])\n\n        if numpy.dot(v_to_facet, nfoo) > 0.0:\n            return nfoo\n        else:\n            return -nfoo\n\n    def compute_tangents(self, dim, i):\n        \"\"\"computes tangents in any dimension based on differences\n        between vertices and the first vertex of the i:th facet\n        of dimension dim.  Returns a (possibly empty) list.\n        These tangents are *NOT* normalized to have unit length.\"\"\"\n        t = self.get_topology()\n        vs = list(map(numpy.array, self.get_vertices_of_subcomplex(t[dim][i])))\n        ts = [v - vs[0] for v in vs[1:]]\n        return ts\n\n    def compute_normalized_tangents(self, dim, i):\n        \"\"\"computes tangents in any dimension based on differences\n        between vertices and the first vertex of the i:th facet\n        of dimension dim.  Returns a (possibly empty) list.\n        These tangents are normalized to have unit length.\"\"\"\n        ts = self.compute_tangents(dim, i)\n        return [t / numpy.linalg.norm(t) for t in ts]\n\n    def compute_edge_tangent(self, edge_i):\n        \"\"\"Computes the nonnormalized tangent to a 1-dimensional facet.\n        returns a single vector.\"\"\"\n        t = self.get_topology()\n        (v0, v1) = self.get_vertices_of_subcomplex(t[1][edge_i])\n        return numpy.array(v1) - numpy.array(v0)\n\n    def compute_normalized_edge_tangent(self, edge_i):\n        \"\"\"Computes the unit tangent vector to a 1-dimensional facet\"\"\"\n        v = self.compute_edge_tangent(edge_i)\n        return v / numpy.linalg.norm(v)\n\n    def compute_face_tangents(self, face_i):\n        \"\"\"Computes the two tangents to a face.  Only implemented\n        for a tetrahedron.\"\"\"\n        if self.get_spatial_dimension() != 3:\n            raise Exception(\"can't get face tangents yet\")\n        t = self.get_topology()\n        (v0, v1, v2) = list(map(numpy.array,\n                                self.get_vertices_of_subcomplex(t[2][face_i])))\n        return (v1 - v0, v2 - v0)\n\n    def compute_face_edge_tangents(self, dim, entity_id):\n        \"\"\"Computes all the edge tangents of any k-face with k>=1.\n        The result is a array of binom(dim+1,2) vectors.\n        This agrees with `compute_edge_tangent` when dim=1.\n        \"\"\"\n        vert_ids = self.get_topology()[dim][entity_id]\n        vert_coords = [numpy.array(x)\n                       for x in self.get_vertices_of_subcomplex(vert_ids)]\n        edge_ts = []\n        for source in range(dim):\n            for dest in range(source + 1, dim + 1):\n                edge_ts.append(vert_coords[dest] - vert_coords[source])\n        return edge_ts\n\n    def make_points(self, dim, entity_id, order):\n        \"\"\"Constructs a lattice of points on the entity_id:th\n        facet of dimension dim.  Order indicates how many points to\n        include in each direction.\"\"\"\n        if dim == 0:\n            return (self.get_vertices()[entity_id], )\n        elif 0 < dim < self.get_spatial_dimension():\n            entity_verts = \\\n                self.get_vertices_of_subcomplex(\n                    self.get_topology()[dim][entity_id])\n            return make_lattice(entity_verts, order, 1)\n        elif dim == self.get_spatial_dimension():\n            return make_lattice(self.get_vertices(), order, 1)\n        else:\n            raise ValueError(\"illegal dimension\")\n\n    def volume(self):\n        \"\"\"Computes the volume of the simplex in the appropriate\n        dimensional measure.\"\"\"\n        return volume(self.get_vertices())\n\n    def volume_of_subcomplex(self, dim, facet_no):\n        vids = self.topology[dim][facet_no]\n        return volume(self.get_vertices_of_subcomplex(vids))\n\n    def compute_scaled_normal(self, facet_i):\n        \"\"\"Returns the unit normal to facet_i of scaled by the\n        volume of that facet.\"\"\"\n        dim = self.get_spatial_dimension()\n        v = self.volume_of_subcomplex(dim - 1, facet_i)\n        return self.compute_normal(facet_i) * v\n\n    def compute_reference_normal(self, facet_dim, facet_i):\n        \"\"\"Returns the unit normal in infinity norm to facet_i.\"\"\"\n        assert facet_dim == self.get_spatial_dimension() - 1\n        n = Simplex.compute_normal(self, facet_i)  # skip UFC overrides\n        return n / numpy.linalg.norm(n, numpy.inf)\n\n    def get_entity_transform(self, dim, entity):\n        \"\"\"Returns a mapping of point coordinates from the\n        `entity`-th subentity of dimension `dim` to the cell.\n\n        :arg dim: subentity dimension (integer)\n        :arg entity: entity number (integer)\n        \"\"\"\n        topology = self.get_topology()\n        celldim = self.get_spatial_dimension()\n        codim = celldim - dim\n        if dim == 0:\n            # Special case vertices.\n            i, = topology[dim][entity]\n            vertex = self.get_vertices()[i]\n            return lambda point: vertex\n        elif dim == celldim:\n            assert entity == 0\n            return lambda point: point\n\n        try:\n            subcell = self.construct_subelement(dim)\n        except NotImplementedError:\n            # Special case for 1D elements.\n            x_c, = self.get_vertices_of_subcomplex(topology[0][entity])\n            return lambda x: x_c\n\n        subdim = subcell.get_spatial_dimension()\n\n        assert subdim == celldim - codim\n\n        # Entity vertices in entity space.\n        v_e = numpy.asarray(subcell.get_vertices())\n\n        A = numpy.zeros([subdim, subdim])\n\n        for i in range(subdim):\n            A[i, :] = (v_e[i + 1] - v_e[0])\n            A[i, :] /= A[i, :].dot(A[i, :])\n\n        # Entity vertices in cell space.\n        v_c = numpy.asarray(self.get_vertices_of_subcomplex(topology[dim][entity]))\n\n        B = numpy.zeros([celldim, subdim])\n\n        for j in range(subdim):\n            B[:, j] = (v_c[j + 1] - v_c[0])\n\n        C = B.dot(A)\n\n        offset = v_c[0] - C.dot(v_e[0])\n\n        return lambda x: offset + C.dot(x)\n\n    def get_dimension(self):\n        \"\"\"Returns the subelement dimension of the cell.  Same as the\n        spatial dimension.\"\"\"\n        return self.get_spatial_dimension()\n\n\n# Backwards compatible name\nReferenceElement = Simplex\n\n\nclass UFCSimplex(Simplex):\n\n    def get_facet_element(self):\n        dimension = self.get_spatial_dimension()\n        return self.construct_subelement(dimension - 1)\n\n    def construct_subelement(self, dimension):\n        \"\"\"Constructs the reference element of a cell subentity\n        specified by subelement dimension.\n\n        :arg dimension: subentity dimension (integer)\n        \"\"\"\n        return ufc_simplex(dimension)\n\n    def contains_point(self, point, epsilon=0):\n        \"\"\"Checks if reference cell contains given point\n        (with numerical tolerance).\"\"\"\n        result = (sum(point) - epsilon <= 1)\n        for c in point:\n            result &= (c + epsilon >= 0)\n        return result\n\n\nclass Point(Simplex):\n    \"\"\"This is the reference point.\"\"\"\n\n    def __init__(self):\n        verts = ((),)\n        topology = {0: {0: (0,)}}\n        super(Point, self).__init__(POINT, verts, topology)\n\n\nclass DefaultLine(Simplex):\n    \"\"\"This is the reference line with vertices (-1.0,) and (1.0,).\"\"\"\n\n    def __init__(self):\n        verts = ((-1.0,), (1.0,))\n        edges = {0: (0, 1)}\n        topology = {0: {0: (0,), 1: (1,)},\n                    1: edges}\n        super(DefaultLine, self).__init__(LINE, verts, topology)\n\n    def get_facet_element(self):\n        raise NotImplementedError()\n\n\nclass UFCInterval(UFCSimplex):\n    \"\"\"This is the reference interval with vertices (0.0,) and (1.0,).\"\"\"\n\n    def __init__(self):\n        verts = ((0.0,), (1.0,))\n        edges = {0: (0, 1)}\n        topology = {0: {0: (0,), 1: (1,)},\n                    1: edges}\n        super(UFCInterval, self).__init__(LINE, verts, topology)\n\n\nclass DefaultTriangle(Simplex):\n    \"\"\"This is the reference triangle with vertices (-1.0,-1.0),\n    (1.0,-1.0), and (-1.0,1.0).\"\"\"\n\n    def __init__(self):\n        verts = ((-1.0, -1.0), (1.0, -1.0), (-1.0, 1.0))\n        edges = {0: (1, 2),\n                 1: (2, 0),\n                 2: (0, 1)}\n        faces = {0: (0, 1, 2)}\n        topology = {0: {0: (0,), 1: (1,), 2: (2,)},\n                    1: edges, 2: faces}\n        super(DefaultTriangle, self).__init__(TRIANGLE, verts, topology)\n\n    def get_facet_element(self):\n        return DefaultLine()\n\n\nclass UFCTriangle(UFCSimplex):\n    \"\"\"This is the reference triangle with vertices (0.0,0.0),\n    (1.0,0.0), and (0.0,1.0).\"\"\"\n\n    def __init__(self):\n        verts = ((0.0, 0.0), (1.0, 0.0), (0.0, 1.0))\n        edges = {0: (1, 2), 1: (0, 2), 2: (0, 1)}\n        faces = {0: (0, 1, 2)}\n        topology = {0: {0: (0,), 1: (1,), 2: (2,)},\n                    1: edges, 2: faces}\n        super(UFCTriangle, self).__init__(TRIANGLE, verts, topology)\n\n    def compute_normal(self, i):\n        \"UFC consistent normal\"\n        t = self.compute_tangents(1, i)[0]\n        n = numpy.array((t[1], -t[0]))\n        return n / numpy.linalg.norm(n)\n\n\nclass IntrepidTriangle(Simplex):\n    \"\"\"This is the Intrepid triangle with vertices (0,0),(1,0),(0,1)\"\"\"\n\n    def __init__(self):\n        verts = ((0.0, 0.0), (1.0, 0.0), (0.0, 1.0))\n        edges = {0: (0, 1),\n                 1: (1, 2),\n                 2: (2, 0)}\n        faces = {0: (0, 1, 2)}\n        topology = {0: {0: (0,), 1: (1,), 2: (2,)},\n                    1: edges, 2: faces}\n        super(IntrepidTriangle, self).__init__(TRIANGLE, verts, topology)\n\n    def get_facet_element(self):\n        # I think the UFC interval is equivalent to what the\n        # IntrepidInterval would be.\n        return UFCInterval()\n\n\nclass DefaultTetrahedron(Simplex):\n    \"\"\"This is the reference tetrahedron with vertices (-1,-1,-1),\n    (1,-1,-1),(-1,1,-1), and (-1,-1,1).\"\"\"\n\n    def __init__(self):\n        verts = ((-1.0, -1.0, -1.0), (1.0, -1.0, -1.0),\n                 (-1.0, 1.0, -1.0), (-1.0, -1.0, 1.0))\n        vs = {0: (0, ),\n              1: (1, ),\n              2: (2, ),\n              3: (3, )}\n        edges = {0: (1, 2),\n                 1: (2, 0),\n                 2: (0, 1),\n                 3: (0, 3),\n                 4: (1, 3),\n                 5: (2, 3)}\n        faces = {0: (1, 3, 2),\n                 1: (2, 3, 0),\n                 2: (3, 1, 0),\n                 3: (0, 1, 2)}\n        tets = {0: (0, 1, 2, 3)}\n        topology = {0: vs, 1: edges, 2: faces, 3: tets}\n        super(DefaultTetrahedron, self).__init__(TETRAHEDRON, verts, topology)\n\n    def get_facet_element(self):\n        return DefaultTriangle()\n\n\nclass IntrepidTetrahedron(Simplex):\n    \"\"\"This is the reference tetrahedron with vertices (0,0,0),\n    (1,0,0),(0,1,0), and (0,0,1) used in the Intrepid project.\"\"\"\n\n    def __init__(self):\n        verts = ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0))\n        vs = {0: (0, ),\n              1: (1, ),\n              2: (2, ),\n              3: (3, )}\n        edges = {0: (0, 1),\n                 1: (1, 2),\n                 2: (2, 0),\n                 3: (0, 3),\n                 4: (1, 3),\n                 5: (2, 3)}\n        faces = {0: (0, 1, 3),\n                 1: (1, 2, 3),\n                 2: (0, 3, 2),\n                 3: (0, 2, 1)}\n        tets = {0: (0, 1, 2, 3)}\n        topology = {0: vs, 1: edges, 2: faces, 3: tets}\n        super(IntrepidTetrahedron, self).__init__(TETRAHEDRON, verts, topology)\n\n    def get_facet_element(self):\n        return IntrepidTriangle()\n\n\nclass UFCTetrahedron(UFCSimplex):\n    \"\"\"This is the reference tetrahedron with vertices (0,0,0),\n    (1,0,0),(0,1,0), and (0,0,1).\"\"\"\n\n    def __init__(self):\n        verts = ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0))\n        vs = {0: (0, ),\n              1: (1, ),\n              2: (2, ),\n              3: (3, )}\n        edges = {0: (2, 3),\n                 1: (1, 3),\n                 2: (1, 2),\n                 3: (0, 3),\n                 4: (0, 2),\n                 5: (0, 1)}\n        faces = {0: (1, 2, 3),\n                 1: (0, 2, 3),\n                 2: (0, 1, 3),\n                 3: (0, 1, 2)}\n        tets = {0: (0, 1, 2, 3)}\n        topology = {0: vs, 1: edges, 2: faces, 3: tets}\n        super(UFCTetrahedron, self).__init__(TETRAHEDRON, verts, topology)\n\n    def compute_normal(self, i):\n        \"UFC consistent normals.\"\n        t = self.compute_tangents(2, i)\n        n = numpy.cross(t[0], t[1])\n        return -2.0 * n / numpy.linalg.norm(n)\n\n\nclass TensorProductCell(Cell):\n    \"\"\"A cell that is the product of FIAT cells.\"\"\"\n\n    def __init__(self, *cells):\n        # Vertices\n        vertices = tuple(tuple(chain(*coords))\n                         for coords in product(*[cell.get_vertices()\n                                                 for cell in cells]))\n\n        # Topology\n        shape = tuple(len(c.get_vertices()) for c in cells)\n        topology = {}\n        for dim in product(*[cell.get_topology().keys()\n                             for cell in cells]):\n            topology[dim] = {}\n            topds = [cell.get_topology()[d]\n                     for cell, d in zip(cells, dim)]\n            for tuple_ei in product(*[sorted(topd)for topd in topds]):\n                tuple_vs = list(product(*[topd[ei]\n                                          for topd, ei in zip(topds, tuple_ei)]))\n                vs = tuple(numpy.ravel_multi_index(numpy.transpose(tuple_vs), shape))\n                topology[dim][tuple_ei] = vs\n            # flatten entity numbers\n            topology[dim] = dict(enumerate(topology[dim][key]\n                                           for key in sorted(topology[dim])))\n\n        super(TensorProductCell, self).__init__(TENSORPRODUCT, vertices, topology)\n        self.cells = tuple(cells)\n\n    def _key(self):\n        return self.cells\n\n    @staticmethod\n    def _split_slices(lengths):\n        n = len(lengths)\n        delimiter = [0] * (n + 1)\n        for i in range(n):\n            delimiter[i + 1] = delimiter[i] + lengths[i]\n        return [slice(delimiter[i], delimiter[i+1])\n                for i in range(n)]\n\n    def get_dimension(self):\n        \"\"\"Returns the subelement dimension of the cell, a tuple of\n        dimensions for each cell in the product.\"\"\"\n        return tuple(c.get_dimension() for c in self.cells)\n\n    def construct_subelement(self, dimension):\n        \"\"\"Constructs the reference element of a cell subentity\n        specified by subelement dimension.\n\n        :arg dimension: dimension in each \"direction\" (tuple)\n        \"\"\"\n        return TensorProductCell(*[c.construct_subelement(d)\n                                   for c, d in zip(self.cells, dimension)])\n\n    def get_entity_transform(self, dim, entity_i):\n        \"\"\"Returns a mapping of point coordinates from the\n        `entity_i`-th subentity of dimension `dim` to the cell.\n\n        :arg dim: subelement dimension (tuple)\n        :arg entity_i: entity number (integer)\n        \"\"\"\n        # unravel entity_i\n        shape = tuple(len(c.get_topology()[d])\n                      for c, d in zip(self.cells, dim))\n        alpha = numpy.unravel_index(entity_i, shape)\n\n        # entity transform on each subcell\n        sct = [c.get_entity_transform(d, i)\n               for c, d, i in zip(self.cells, dim, alpha)]\n\n        slices = TensorProductCell._split_slices(dim)\n\n        def transform(point):\n            return list(chain(*[t(point[s])\n                                for t, s in zip(sct, slices)]))\n        return transform\n\n    def volume(self):\n        \"\"\"Computes the volume in the appropriate dimensional measure.\"\"\"\n        return numpy.prod([c.volume() for c in self.cells])\n\n    def compute_reference_normal(self, facet_dim, facet_i):\n        \"\"\"Returns the unit normal in infinity norm to facet_i of\n        subelement dimension facet_dim.\"\"\"\n        assert len(facet_dim) == len(self.get_dimension())\n        indicator = numpy.array(self.get_dimension()) - numpy.array(facet_dim)\n        (cell_i,), = numpy.nonzero(indicator)\n\n        n = []\n        for i, c in enumerate(self.cells):\n            if cell_i == i:\n                n.extend(c.compute_reference_normal(facet_dim[i], facet_i))\n            else:\n                n.extend([0] * c.get_spatial_dimension())\n        return numpy.asarray(n)\n\n    def contains_point(self, point, epsilon=0):\n        \"\"\"Checks if reference cell contains given point\n        (with numerical tolerance).\"\"\"\n        lengths = [c.get_spatial_dimension() for c in self.cells]\n        assert len(point) == sum(lengths)\n        slices = TensorProductCell._split_slices(lengths)\n        return reduce(operator.and_,\n                      (c.contains_point(point[s], epsilon=epsilon)\n                       for c, s in zip(self.cells, slices)),\n                      True)\n\n\nclass UFCQuadrilateral(Cell):\n    \"\"\"This is the reference quadrilateral with vertices\n    (0.0, 0.0), (0.0, 1.0), (1.0, 0.0) and (1.0, 1.0).\"\"\"\n\n    def __init__(self):\n        product = TensorProductCell(UFCInterval(), UFCInterval())\n        pt = product.get_topology()\n\n        verts = product.get_vertices()\n        topology = flatten_entities(pt)\n\n        super(UFCQuadrilateral, self).__init__(QUADRILATERAL, verts, topology)\n\n        self.product = product\n        self.unflattening_map = compute_unflattening_map(pt)\n\n    def get_dimension(self):\n        \"\"\"Returns the subelement dimension of the cell.  Same as the\n        spatial dimension.\"\"\"\n        return self.get_spatial_dimension()\n\n    def construct_subelement(self, dimension):\n        \"\"\"Constructs the reference element of a cell subentity\n        specified by subelement dimension.\n\n        :arg dimension: subentity dimension (integer)\n        \"\"\"\n        if dimension == 2:\n            return self\n        elif dimension == 1:\n            return UFCInterval()\n        elif dimension == 0:\n            return Point()\n        else:\n            raise ValueError(\"Invalid dimension: %d\" % (dimension,))\n\n    def get_entity_transform(self, dim, entity_i):\n        \"\"\"Returns a mapping of point coordinates from the\n        `entity_i`-th subentity of dimension `dim` to the cell.\n\n        :arg dim: entity dimension (integer)\n        :arg entity_i: entity number (integer)\n        \"\"\"\n        d, e = self.unflattening_map[(dim, entity_i)]\n        return self.product.get_entity_transform(d, e)\n\n    def volume(self):\n        \"\"\"Computes the volume in the appropriate dimensional measure.\"\"\"\n        return self.product.volume()\n\n    def compute_reference_normal(self, facet_dim, facet_i):\n        \"\"\"Returns the unit normal in infinity norm to facet_i.\"\"\"\n        assert facet_dim == 1\n        d, i = self.unflattening_map[(facet_dim, facet_i)]\n        return self.product.compute_reference_normal(d, i)\n\n    def contains_point(self, point, epsilon=0):\n        \"\"\"Checks if reference cell contains given point\n        (with numerical tolerance).\"\"\"\n        return self.product.contains_point(point, epsilon=epsilon)\n\n\nclass UFCHexahedron(Cell):\n    \"\"\"This is the reference hexahedron with vertices\n    (0.0, 0.0, 0.0), (0.0, 0.0, 1.0), (0.0, 1.0, 0.0), (0.0, 1.0, 1.0),\n    (1.0, 0.0, 0.0), (1.0, 0.0, 1.0), (1.0, 1.0, 0.0) and (1.0, 1.0, 1.0).\"\"\"\n\n    def __init__(self):\n        product = TensorProductCell(UFCInterval(), UFCInterval(), UFCInterval())\n        pt = product.get_topology()\n\n        verts = product.get_vertices()\n        topology = flatten_entities(pt)\n\n        super(UFCHexahedron, self).__init__(HEXAHEDRON, verts, topology)\n\n        self.product = product\n        self.unflattening_map = compute_unflattening_map(pt)\n\n    def get_dimension(self):\n        \"\"\"Returns the subelement dimension of the cell.  Same as the\n        spatial dimension.\"\"\"\n        return self.get_spatial_dimension()\n\n    def construct_subelement(self, dimension):\n        \"\"\"Constructs the reference element of a cell subentity\n        specified by subelement dimension.\n\n        :arg dimension: subentity dimension (integer)\n        \"\"\"\n        if dimension == 3:\n            return self\n        elif dimension == 2:\n            return UFCQuadrilateral()\n        elif dimension == 1:\n            return UFCInterval()\n        elif dimension == 0:\n            return Point()\n        else:\n            raise ValueError(\"Invalid dimension: %d\" % (dimension,))\n\n    def get_entity_transform(self, dim, entity_i):\n        \"\"\"Returns a mapping of point coordinates from the\n        `entity_i`-th subentity of dimension `dim` to the cell.\n\n        :arg dim: entity dimension (integer)\n        :arg entity_i: entity number (integer)\n        \"\"\"\n        d, e = self.unflattening_map[(dim, entity_i)]\n        return self.product.get_entity_transform(d, e)\n\n    def volume(self):\n        \"\"\"Computes the volume in the appropriate dimensional measure.\"\"\"\n        return self.product.volume()\n\n    def compute_reference_normal(self, facet_dim, facet_i):\n        \"\"\"Returns the unit normal in infinity norm to facet_i.\"\"\"\n        assert facet_dim == 2\n        d, i = self.unflattening_map[(facet_dim, facet_i)]\n        return self.product.compute_reference_normal(d, i)\n\n    def contains_point(self, point, epsilon=0):\n        \"\"\"Checks if reference cell contains given point\n        (with numerical tolerance).\"\"\"\n        return self.product.contains_point(point, epsilon=epsilon)\n\n\ndef make_affine_mapping(xs, ys):\n    \"\"\"Constructs (A,b) such that x --> A * x + b is the affine\n    mapping from the simplex defined by xs to the simplex defined by ys.\"\"\"\n\n    dim_x = len(xs[0])\n    dim_y = len(ys[0])\n\n    if len(xs) != len(ys):\n        raise Exception(\"\")\n\n    # find A in R^{dim_y,dim_x}, b in R^{dim_y} such that\n    # A xs[i] + b = ys[i] for all i\n\n    mat = numpy.zeros((dim_x * dim_y + dim_y, dim_x * dim_y + dim_y), \"d\")\n    rhs = numpy.zeros((dim_x * dim_y + dim_y,), \"d\")\n\n    # loop over points\n    for i in range(len(xs)):\n        # loop over components of each A * point + b\n        for j in range(dim_y):\n            row_cur = i * dim_y + j\n            col_start = dim_x * j\n            col_finish = col_start + dim_x\n            mat[row_cur, col_start:col_finish] = numpy.array(xs[i])\n            rhs[row_cur] = ys[i][j]\n            # need to get terms related to b\n            mat[row_cur, dim_y * dim_x + j] = 1.0\n\n    sol = numpy.linalg.solve(mat, rhs)\n\n    A = numpy.reshape(sol[:dim_x * dim_y], (dim_y, dim_x))\n    b = sol[dim_x * dim_y:]\n\n    return A, b\n\n\ndef default_simplex(spatial_dim):\n    \"\"\"Factory function that maps spatial dimension to an instance of\n    the default reference simplex of that dimension.\"\"\"\n    if spatial_dim == 1:\n        return DefaultLine()\n    elif spatial_dim == 2:\n        return DefaultTriangle()\n    elif spatial_dim == 3:\n        return DefaultTetrahedron()\n    else:\n        raise RuntimeError(\"Can't create default simplex of dimension %s.\" % str(spatial_dim))\n\n\ndef ufc_simplex(spatial_dim):\n    \"\"\"Factory function that maps spatial dimension to an instance of\n    the UFC reference simplex of that dimension.\"\"\"\n    if spatial_dim == 0:\n        return Point()\n    elif spatial_dim == 1:\n        return UFCInterval()\n    elif spatial_dim == 2:\n        return UFCTriangle()\n    elif spatial_dim == 3:\n        return UFCTetrahedron()\n    else:\n        raise RuntimeError(\"Can't create UFC simplex of dimension %s.\" % str(spatial_dim))\n\n\ndef ufc_cell(cell):\n    \"\"\"Handle incoming calls from FFC.\"\"\"\n\n    # celltype could be a string or a cell.\n    if isinstance(cell, str):\n        celltype = cell\n    else:\n        celltype = cell.cellname()\n\n    if \" * \" in celltype:\n        # Tensor product cell\n        return TensorProductCell(*map(ufc_cell, celltype.split(\" * \")))\n    elif celltype == \"quadrilateral\":\n        return UFCQuadrilateral()\n    elif celltype == \"hexahedron\":\n        return UFCHexahedron()\n    elif celltype == \"interval\":\n        return ufc_simplex(1)\n    elif celltype == \"triangle\":\n        return ufc_simplex(2)\n    elif celltype == \"tetrahedron\":\n        return ufc_simplex(3)\n    else:\n        raise RuntimeError(\"Don't know how to create UFC cell of type %s\" % str(celltype))\n\n\ndef volume(verts):\n    \"\"\"Constructs the volume of the simplex spanned by verts\"\"\"\n\n    # use fact that volume of UFC reference element is 1/n!\n    sd = len(verts) - 1\n    ufcel = ufc_simplex(sd)\n    ufcverts = ufcel.get_vertices()\n\n    A, b = make_affine_mapping(ufcverts, verts)\n\n    # can't just take determinant since, e.g. the face of\n    # a tet being mapped to a 2d triangle doesn't have a\n    # square matrix\n\n    (u, s, vt) = numpy.linalg.svd(A)\n\n    # this is the determinant of the \"square part\" of the matrix\n    # (ie the part that maps the restriction of the higher-dimensional\n    # stuff to UFC element\n    p = numpy.prod([si for si in s if (si) > 1.e-10])\n\n    return p / factorial(sd)\n\n\ndef tuple_sum(tree):\n    \"\"\"\n    This function calculates the sum of elements in a tuple, it is needed to handle nested tuples in TensorProductCell.\n    Example: tuple_sum(((1, 0), 1)) returns 2\n    If input argument is not the tuple, returns input.\n    \"\"\"\n    if isinstance(tree, tuple):\n        return sum(map(tuple_sum, tree))\n    else:\n        return tree\n\n\ndef flatten_entities(topology_dict):\n    \"\"\"This function flattens topology dict of TensorProductCell and entity_dofs dict of TensorProductElement\"\"\"\n\n    flattened_entities = defaultdict(list)\n    for dim in sorted(topology_dict.keys()):\n        flat_dim = tuple_sum(dim)\n        flattened_entities[flat_dim] += [v for k, v in sorted(topology_dict[dim].items())]\n\n    return {dim: dict(enumerate(entities))\n            for dim, entities in flattened_entities.items()}\n\n\ndef compute_unflattening_map(topology_dict):\n    \"\"\"This function returns unflattening map for the given tensor product topology dict.\"\"\"\n\n    counter = defaultdict(count)\n    unflattening_map = {}\n\n    for dim, entities in sorted(topology_dict.items()):\n        flat_dim = tuple_sum(dim)\n        for entity in entities:\n            flat_entity = next(counter[flat_dim])\n            unflattening_map[(flat_dim, flat_entity)] = (dim, entity)\n\n    return unflattening_map\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/reference_element.py"}, {"coverage": [null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, 1, null, null, 1, null, 1, 1, 1, 0, null, null, null, 1, null, null, 1, null, null, 1, null, 1, 1, 1, null, 1, 1, 1, 1, null, 1, 1, 1, null, 1, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, 1, 1, 1, 1, null, null, 1, 1, 1, 1, null, null, 1, null, null, null, null, 1, 1, null, 1, null, 1, null, 1, null, 1], "source": "# -*- coding: utf-8 -*-\n\"\"\"Implementation of the generalized Regge finite elements.\"\"\"\n\n# Copyright (C) 2015-2018 Lizao Li\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\nfrom FIAT.finite_element import CiarletElement\nfrom FIAT.dual_set import DualSet\nfrom FIAT.polynomial_set import ONSymTensorPolynomialSet\nfrom FIAT.functional import PointwiseInnerProductEvaluation as InnerProduct\n\n\nclass ReggeDual(DualSet):\n    \"\"\"Degrees of freedom for generalized Regge finite elements.\"\"\"\n    def __init__(self, cell, degree):\n        dim = cell.get_spatial_dimension()\n        if (dim < 2) or (dim > 3):\n            raise ValueError(\"Generalized Regge elements are implemented only \"\n                             \"for dimension 2--3. For 1D, it is just DG(r).\")\n\n        # construct the degrees of freedoms\n        dofs = []               # list of functionals\n        # dof_ids[i][j] contains the indices of dofs that are associated with\n        # entity j in dim i\n        dof_ids = {}\n\n        # no vertex dof\n        dof_ids[0] = {i: [] for i in range(dim + 1)}\n        # edge dofs\n        (_dofs, _dof_ids) = self._generate_dofs(cell, 1, degree, 0)\n        dofs.extend(_dofs)\n        dof_ids[1] = _dof_ids\n        # facet dofs for 3D\n        if dim == 3:\n            (_dofs, _dof_ids) = self._generate_dofs(cell, 2, degree, len(dofs))\n            dofs.extend(_dofs)\n            dof_ids[2] = _dof_ids\n        # cell dofs\n        (_dofs, _dof_ids) = self._generate_dofs(cell, dim, degree, len(dofs))\n        dofs.extend(_dofs)\n        dof_ids[dim] = _dof_ids\n\n        super(ReggeDual, self).__init__(dofs, cell, dof_ids)\n\n    @staticmethod\n    def _generate_dofs(cell, entity_dim, degree, offset):\n        \"\"\"generate degrees of freedom for enetities of dimension entity_dim\n\n        Input: all obvious except\n           offset  -- the current first available dof id.\n\n        Output:\n           dofs    -- an array of dofs associated to entities in that dim\n           dof_ids -- a dict mapping entity_id to the range of indices of dofs\n                      associated to it.\n\n        On a k-face for degree r, the dofs are given by the value of\n           t^T u t\n        evaluated at points enough to control P(r-k+1) for all the edge\n        tangents of the face.\n        `cell.make_points(entity_dim, entity_id, degree + 2)` happens to\n        generate exactly those points needed.\n        \"\"\"\n        dofs = []\n        dof_ids = {}\n        num_entities = len(cell.get_topology()[entity_dim])\n        for entity_id in range(num_entities):\n            pts = cell.make_points(entity_dim, entity_id, degree + 2)\n            tangents = cell.compute_face_edge_tangents(entity_dim, entity_id)\n            dofs += [InnerProduct(cell, t, t, pt)\n                     for pt in pts\n                     for t in tangents]\n            num_new_dofs = len(pts) * len(tangents)\n            dof_ids[entity_id] = list(range(offset, offset + num_new_dofs))\n            offset += num_new_dofs\n        return (dofs, dof_ids)\n\n\nclass Regge(CiarletElement):\n    \"\"\"The generalized Regge elements for symmetric-matrix-valued functions.\n       REG(r) in dimension n is the space of polynomial symmetric-matrix-valued\n       functions of degree r or less with tangential-tangential continuity.\n    \"\"\"\n    def __init__(self, cell, degree):\n        assert degree >= 0, \"Regge start at degree 0!\"\n        # shape functions\n        Ps = ONSymTensorPolynomialSet(cell, degree)\n        # degrees of freedom\n        Ls = ReggeDual(cell, degree)\n        # mapping under affine transformation\n        mapping = \"double covariant piola\"\n\n        super(Regge, self).__init__(Ps, Ls, degree, mapping=mapping)\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/regge.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, null, null, 1, null, null, 1, null, null, 1, 0, null, 1, 1, null, 1, 0, null, 1, 0, null, 1, 1, null, null, 1, null, null, 1, null, null, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, null, null, 1, 1, 1, null, null, 1, null, null, 1, null, null, 1, 1, 1, null, null, 1, null, null, 1, null, 1, null, null, 1, 1, 1, 0, 1, 0, 1, 1, null, 0, null, 1, null, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, null, 1, 1, 1, 1], "source": "# Copyright (C) 2015-2016 Jan Blechta, Andrew T T McRae, and others\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nfrom FIAT.dual_set import DualSet\nfrom FIAT.finite_element import CiarletElement\n\n\nclass RestrictedElement(CiarletElement):\n    \"\"\"Restrict given element to specified list of dofs.\"\"\"\n\n    def __init__(self, element, indices=None, restriction_domain=None):\n        '''For sake of argument, indices overrides restriction_domain'''\n\n        if not (indices or restriction_domain):\n            raise RuntimeError(\"Either indices or restriction_domain must be passed in\")\n\n        if not indices:\n            indices = _get_indices(element, restriction_domain)\n\n        if isinstance(indices, str):\n            raise RuntimeError(\"variable 'indices' was a string; did you forget to use a keyword?\")\n\n        if len(indices) == 0:\n            raise ValueError(\"No point in creating empty RestrictedElement.\")\n\n        self._element = element\n        self._indices = indices\n\n        # Fetch reference element\n        ref_el = element.get_reference_element()\n\n        # Restrict primal set\n        poly_set = element.get_nodal_basis().take(indices)\n\n        # Restrict dual set\n        dof_counter = 0\n        entity_ids = {}\n        nodes = []\n        nodes_old = element.dual_basis()\n        for d, entities in element.entity_dofs().items():\n            entity_ids[d] = {}\n            for entity, dofs in entities.items():\n                entity_ids[d][entity] = []\n                for dof in dofs:\n                    if dof not in indices:\n                        continue\n                    entity_ids[d][entity].append(dof_counter)\n                    dof_counter += 1\n                    nodes.append(nodes_old[dof])\n        assert dof_counter == len(indices)\n        dual = DualSet(nodes, ref_el, entity_ids)\n\n        # Restrict mapping\n        mapping_old = element.mapping()\n        mapping_new = [mapping_old[dof] for dof in indices]\n        assert all(e_mapping == mapping_new[0] for e_mapping in mapping_new)\n\n        # Call constructor of CiarletElement\n        super(RestrictedElement, self).__init__(poly_set, dual, 0, element.get_formdegree(), mapping_new[0])\n\n\ndef sorted_by_key(mapping):\n    \"Sort dict items by key, allowing different key types.\"\n    # Python3 doesn't allow comparing builtins of different type, therefore the typename trick here\n    def _key(x):\n        return (type(x[0]).__name__, x[0])\n    return sorted(mapping.items(), key=_key)\n\n\ndef _get_indices(element, restriction_domain):\n    \"Restriction domain can be 'interior', 'vertex', 'edge', 'face' or 'facet'\"\n\n    if restriction_domain == \"interior\":\n        # Return dofs from interior\n        return element.entity_dofs()[max(element.entity_dofs().keys())][0]\n\n    # otherwise return dofs with d <= dim\n    if restriction_domain == \"vertex\":\n        dim = 0\n    elif restriction_domain == \"edge\":\n        dim = 1\n    elif restriction_domain == \"face\":\n        dim = 2\n    elif restriction_domain == \"facet\":\n        dim = element.get_reference_element().get_spatial_dimension() - 1\n    else:\n        raise RuntimeError(\"Invalid restriction domain\")\n\n    is_prodcell = isinstance(max(element.entity_dofs().keys()), tuple)\n\n    entity_dofs = element.entity_dofs()\n    indices = []\n    for d in range(dim + 1):\n        if is_prodcell:\n            for a in range(d + 1):\n                b = d - a\n                try:\n                    entities = entity_dofs[(a, b)]\n                    for (entity, index) in sorted_by_key(entities):\n                        indices += index\n                except KeyError:\n                    pass\n        else:\n            entities = entity_dofs[d]\n            for (entity, index) in sorted_by_key(entities):\n                indices += index\n    return indices\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/restricted.py"}, {"coverage": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, 1, 1, 1, 1, 1, null, null, 1, 1, null, null, 1, 1, null, null, 1, null, null, null, 1, null, 1, 1, 0, null, 1, null, null, 1, null, null, 1, 1, 1, 0, 1, 1, null, 0, null, null, 1, 1, 1, 1, null, 1, 1, 1, 1, 1, 1, 1, null, null, 1, null, null, 1, 1, null, null, null, null, null, null, null, 1, 1, 1, 1, 1, null, null, null, null, null, null, 1, 0, null, 0, null, 0, null, 0, null, null, 0, null, 1, 1, 1, null, null, null, null, 1, null, 0, null, null, null, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, 1, 1, null, null, null, null, null, null, 1, 1, null, 0, null, 1, 1, 1, null, null, 1, 0, 1, 1, 1, null, null, 1, 1, null, 0, null, 1, 0, 0, null, null, null, 0, 0, null, 0, null, 1, 0, 0, null, 0, 0, 0, 0, 0, null, 0, 0, null, 0, null, 1, 0, 0, null, 0, 0, 0, 0, 0, null, 0, 0, null, 0, null, 1, null, 1, 1, null, 0, null, 1, null, 1, null, 1, 1, null, null, 1, null, 1, null, 1, null, 1, null, null, 1, null, 1, null, null, 0, null, 1, null, null, 1, 1, 1, null, 1, null, 1, null, null, 1, 1, 1, null, 1, null, 1, 1, null, 1, 1, null, null, null, 1, 1, 1, null, null, null, null, null, null, null, null, null, 1, 1, 1, 0, 1, 1, 1, 1, 1, null, null, null, null, null, null, null, null, null, null, 1, null, null, null, 1, 1, null, null, null, null, null, null, null, null, null, null, null, null, 1, null, null, null, 1, 1, null, null, null, null, null, null, 1, 0, null, null, null, null, null, null, 0, null, null, null, 0, 0, null, null, null, 0, 1, null, 1, null, 1, 1, 0, 0, 0, 0, null, 0, null, 1, null, null, 0, null, 1, null, 0, null, 1, null, 0, null, null, 1, null, null, null, null, null, null, 1, null, 1, 1, null, 1, 1, 1, 1, null, 0, null, 1, null, 1, 1, 1, 1, null, null, 1, null, 1, null, 1, null, 1, null, null, 1, 1, null, null, null, 1, 1, null, 1, null, 1, null, 1, null, 1, null, null, 1, null, 1, null, null, 0, null, 1, null, null, 0, null, 1, null, 0, null, 1, null, 0], "source": "# Copyright (C) 2008 Robert C. Kirby (Texas Tech University)\n# Copyright (C) 2013 Andrew T. T. McRae\n# Modified by Thomas H. Gibson, 2016\n#\n# This file is part of FIAT.\n#\n# FIAT is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FIAT is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with FIAT. If not, see <http://www.gnu.org/licenses/>.\n\nimport numpy\nfrom FIAT.finite_element import FiniteElement\nfrom FIAT.reference_element import TensorProductCell, UFCQuadrilateral, UFCHexahedron, flatten_entities, compute_unflattening_map\nfrom FIAT.dual_set import DualSet\nfrom FIAT.polynomial_set import mis\nfrom FIAT import dual_set\nfrom FIAT import functional\n\n\ndef _first_point(node):\n    return tuple(node.get_point_dict().keys())[0]\n\n\ndef _first_point_pair(node):\n    return tuple(node.get_point_dict().items())[0]\n\n\nclass TensorProductElement(FiniteElement):\n    \"\"\"Class implementing a finite element that is the tensor product\n    of two existing finite elements.\"\"\"\n\n    def __init__(self, A, B):\n        # set up simple things\n        order = min(A.get_order(), B.get_order())\n        if A.get_formdegree() is None or B.get_formdegree() is None:\n            formdegree = None\n        else:\n            formdegree = A.get_formdegree() + B.get_formdegree()\n\n        # set up reference element\n        ref_el = TensorProductCell(A.get_reference_element(),\n                                   B.get_reference_element())\n\n        if A.mapping()[0] != \"affine\" and B.mapping()[0] == \"affine\":\n            mapping = A.mapping()[0]\n        elif B.mapping()[0] != \"affine\" and A.mapping()[0] == \"affine\":\n            mapping = B.mapping()[0]\n        elif A.mapping()[0] == \"affine\" and B.mapping()[0] == \"affine\":\n            mapping = \"affine\"\n        else:\n            raise ValueError(\"check tensor product mappings - at least one must be affine\")\n\n        # set up entity_ids\n        Adofs = A.entity_dofs()\n        Bdofs = B.entity_dofs()\n        Bsdim = B.space_dimension()\n        entity_ids = {}\n\n        for curAdim in Adofs:\n            for curBdim in Bdofs:\n                entity_ids[(curAdim, curBdim)] = {}\n                dim_cur = 0\n                for entityA in Adofs[curAdim]:\n                    for entityB in Bdofs[curBdim]:\n                        entity_ids[(curAdim, curBdim)][dim_cur] = \\\n                            [x*Bsdim + y for x in Adofs[curAdim][entityA]\n                                for y in Bdofs[curBdim][entityB]]\n                        dim_cur += 1\n\n        # set up dual basis\n        Anodes = A.dual_basis()\n        Bnodes = B.dual_basis()\n\n        # build the dual set by inspecting the current dual\n        # sets item by item.\n        # Currently supported cases:\n        # PointEval x PointEval = PointEval [scalar x scalar = scalar]\n        # PointScaledNormalEval x PointEval = PointScaledNormalEval [vector x scalar = vector]\n        # ComponentPointEvaluation x PointEval [vector x scalar = vector]\n        nodes = []\n        for Anode in Anodes:\n            if isinstance(Anode, functional.PointEvaluation):\n                for Bnode in Bnodes:\n                    if isinstance(Bnode, functional.PointEvaluation):\n                        # case: PointEval x PointEval\n                        # the PointEval functional just requires the\n                        # coordinates. these are currently stored as\n                        # the key of a one-item dictionary. we retrieve\n                        # these by calling get_point_dict(), and\n                        # use the concatenation to make a new PointEval\n                        nodes.append(functional.PointEvaluation(ref_el, _first_point(Anode) + _first_point(Bnode)))\n                    elif isinstance(Bnode, functional.IntegralMoment):\n                        # dummy functional for product with integral moments\n                        nodes.append(functional.Functional(None, None, None,\n                                                           {}, \"Undefined\"))\n                    elif isinstance(Bnode, functional.PointDerivative):\n                        # dummy functional for product with point derivative\n                        nodes.append(functional.Functional(None, None, None,\n                                                           {}, \"Undefined\"))\n                    else:\n                        raise NotImplementedError(\"unsupported functional type\")\n\n            elif isinstance(Anode, functional.PointScaledNormalEvaluation):\n                for Bnode in Bnodes:\n                    if isinstance(Bnode, functional.PointEvaluation):\n                        # case: PointScaledNormalEval x PointEval\n                        # this could be wrong if the second shape\n                        # has spatial dimension >1, since we are not\n                        # explicitly scaling by facet size\n                        if len(_first_point(Bnode)) > 1:\n                            # TODO: support this case one day\n                            raise NotImplementedError(\"PointScaledNormalEval x PointEval is not yet supported if the second shape has dimension > 1\")\n                        # We cannot make a new functional.PSNEval in\n                        # the natural way, since it tries to compute\n                        # the normal vector by itself.\n                        # Instead, we create things manually, and\n                        # call Functional() with these arguments\n                        sd = ref_el.get_spatial_dimension()\n                        # The pt_dict is a one-item dictionary containing\n                        # the details of the functional.\n                        # The key is the spatial coordinate, which\n                        # is just a concatenation of the two parts.\n                        # The value is a list of tuples, representing\n                        # the normal vector (scaled by the volume of\n                        # the facet) at that point.\n                        # Each tuple looks like (foo, (i,)); the i'th\n                        # component of the scaled normal is foo.\n\n                        # The following line is only valid when the second\n                        # shape has spatial dimension 1 (enforced above)\n                        Apoint, Avalue = _first_point_pair(Anode)\n                        pt_dict = {Apoint + _first_point(Bnode): Avalue + [(0.0, (len(Apoint),))]}\n\n                        # The following line should be used in the\n                        # general case\n                        # pt_dict = {Anode.get_point_dict().keys()[0] + Bnode.get_point_dict().keys()[0]: Anode.get_point_dict().values()[0] + [(0.0, (ii,)) for ii in range(len(Anode.get_point_dict().keys()[0]), len(Anode.get_point_dict().keys()[0]) + len(Bnode.get_point_dict().keys()[0]))]}\n\n                        # THE FOLLOWING IS PROBABLY CORRECT BUT UNTESTED\n                        shp = (sd,)\n                        nodes.append(functional.Functional(ref_el, shp, pt_dict, {}, \"PointScaledNormalEval\"))\n                    else:\n                        raise NotImplementedError(\"unsupported functional type\")\n\n            elif isinstance(Anode, functional.PointEdgeTangentEvaluation):\n                for Bnode in Bnodes:\n                    if isinstance(Bnode, functional.PointEvaluation):\n                        # case: PointEdgeTangentEval x PointEval\n                        # this is very similar to the case above, so comments omitted\n                        if len(_first_point(Bnode)) > 1:\n                            raise NotImplementedError(\"PointEdgeTangentEval x PointEval is not yet supported if the second shape has dimension > 1\")\n                        sd = ref_el.get_spatial_dimension()\n                        Apoint, Avalue = _first_point_pair(Anode)\n                        pt_dict = {Apoint + _first_point(Bnode): Avalue + [(0.0, (len(Apoint),))]}\n\n                        # THE FOLLOWING IS PROBABLY CORRECT BUT UNTESTED\n                        shp = (sd,)\n                        nodes.append(functional.Functional(ref_el, shp, pt_dict, {}, \"PointEdgeTangent\"))\n                    else:\n                        raise NotImplementedError(\"unsupported functional type\")\n\n            elif isinstance(Anode, functional.ComponentPointEvaluation):\n                for Bnode in Bnodes:\n                    if isinstance(Bnode, functional.PointEvaluation):\n                        # case: ComponentPointEval x PointEval\n                        # the CptPointEval functional requires the component\n                        # and the coordinates. very similar to PE x PE case.\n                        sd = ref_el.get_spatial_dimension()\n                        nodes.append(functional.ComponentPointEvaluation(ref_el, Anode.comp, (sd,), _first_point(Anode) + _first_point(Bnode)))\n                    else:\n                        raise NotImplementedError(\"unsupported functional type\")\n\n            elif isinstance(Anode, functional.FrobeniusIntegralMoment):\n                for Bnode in Bnodes:\n                    if isinstance(Bnode, functional.PointEvaluation):\n                        # case: FroIntMom x PointEval\n                        sd = ref_el.get_spatial_dimension()\n                        pt_dict = {}\n                        pt_old = Anode.get_point_dict()\n                        for pt in pt_old:\n                            pt_dict[pt+_first_point(Bnode)] = pt_old[pt] + [(0.0, sd-1)]\n                        # THE FOLLOWING IS PROBABLY CORRECT BUT UNTESTED\n                        shp = (sd,)\n                        nodes.append(functional.Functional(ref_el, shp, pt_dict, {}, \"FrobeniusIntegralMoment\"))\n                    else:\n                        raise NotImplementedError(\"unsupported functional type\")\n\n            elif isinstance(Anode, functional.IntegralMoment):\n                for Bnode in Bnodes:\n                    if isinstance(Bnode, functional.PointEvaluation):\n                        # case: IntMom x PointEval\n                        sd = ref_el.get_spatial_dimension()\n                        pt_dict = {}\n                        pt_old = Anode.get_point_dict()\n                        for pt in pt_old:\n                            pt_dict[pt+_first_point(Bnode)] = pt_old[pt]\n                        # THE FOLLOWING IS PROBABLY CORRECT BUT UNTESTED\n                        shp = (sd,)\n                        nodes.append(functional.Functional(ref_el, shp, pt_dict, {}, \"IntegralMoment\"))\n                    else:\n                        raise NotImplementedError(\"unsupported functional type\")\n\n            elif isinstance(Anode, functional.Functional):\n                # this should catch everything else\n                for Bnode in Bnodes:\n                    nodes.append(functional.Functional(None, None, None, {}, \"Undefined\"))\n            else:\n                raise NotImplementedError(\"unsupported functional type\")\n\n        dual = dual_set.DualSet(nodes, ref_el, entity_ids)\n\n        super(TensorProductElement, self).__init__(ref_el, dual, order, formdegree, mapping)\n        # Set up constituent elements\n        self.A = A\n        self.B = B\n\n        # degree for quadrature rule\n        self.polydegree = max(A.degree(), B.degree())\n\n    def degree(self):\n        \"\"\"Return the degree of the (embedding) polynomial space.\"\"\"\n        return self.polydegree\n\n    def get_nodal_basis(self):\n        \"\"\"Return the nodal basis, encoded as a PolynomialSet object,\n        for the finite element.\"\"\"\n        raise NotImplementedError(\"get_nodal_basis not implemented\")\n\n    def get_coeffs(self):\n        \"\"\"Return the expansion coefficients for the basis of the\n        finite element.\"\"\"\n        raise NotImplementedError(\"get_coeffs not implemented\")\n\n    def tabulate(self, order, points, entity=None):\n        \"\"\"Return tabulated values of derivatives up to given order of\n        basis functions at given points.\"\"\"\n        if entity is None:\n            entity = (self.ref_el.get_dimension(), 0)\n        entity_dim, entity_id = entity\n\n        shape = tuple(len(c.get_topology()[d])\n                      for c, d in zip(self.ref_el.cells, entity_dim))\n        idA, idB = numpy.unravel_index(entity_id, shape)\n\n        # Factor the entity argument to get entities of the component elements\n        entityA_dim, entityB_dim = entity_dim\n        entityA = (entityA_dim, idA)\n        entityB = (entityB_dim, idB)\n\n        pointsAdim, pointsBdim = [c.get_spatial_dimension()\n                                  for c in self.ref_el.construct_subelement(entity_dim).cells]\n        pointsA = [point[:pointsAdim] for point in points]\n        pointsB = [point[pointsAdim:pointsAdim + pointsBdim] for point in points]\n\n        Asdim = self.A.ref_el.get_spatial_dimension()\n        Bsdim = self.B.ref_el.get_spatial_dimension()\n        # Note that for entities other than cells, the following\n        # tabulations are already appropriately zero-padded so no\n        # additional zero padding is required.\n        Atab = self.A.tabulate(order, pointsA, entityA)\n        Btab = self.B.tabulate(order, pointsB, entityB)\n        npoints = len(points)\n\n        # allow 2 scalar-valued FE spaces, or 1 scalar-valued,\n        # 1 vector-valued. Combining 2 vector-valued spaces\n        # into a tensor-valued space via an outer-product\n        # seems to be a sensible general option, but I don't\n        # know how to handle the nestedness of the arrays\n        # if someone then tries to make a new \"tensor finite\n        # element\" where one component is already a\n        # tensor-valued space!\n        A_valuedim = len(self.A.value_shape())  # scalar: 0, vector: 1\n        B_valuedim = len(self.B.value_shape())  # scalar: 0, vector: 1\n        if A_valuedim + B_valuedim > 1:\n            raise NotImplementedError(\"tabulate does not support two vector-valued inputs\")\n        result = {}\n        for i in range(order + 1):\n            alphas = mis(Asdim+Bsdim, i)  # thanks, Rob!\n            for alpha in alphas:\n                if A_valuedim == 0 and B_valuedim == 0:\n                    # for each point, get outer product of (A's basis\n                    # functions f1, f2, ... evaluated at that point)\n                    # with (B's basis functions g1, g2, ... evaluated\n                    # at that point). This gives temp[point][f_i][g_j].\n                    # Flatten this, so bfs are\n                    # in the order f1g1, f1g2, ..., f2g1, f2g2, ...\n                    # which is compatible with the entity_dofs order.\n                    # We now have temp[point][full basis function]\n                    # Transpose this to get temp[bf][point],\n                    # and we are done.\n                    temp = numpy.array([numpy.outer(\n                                       Atab[alpha[0:Asdim]][..., j],\n                                       Btab[alpha[Asdim:Asdim+Bsdim]][..., j])\n                        .ravel() for j in range(npoints)])\n                    result[alpha] = temp.transpose()\n                elif A_valuedim == 1 and B_valuedim == 0:\n                    # similar to above, except A's basis functions\n                    # are now vector-valued. numpy.outer flattens the\n                    # array, so it's like taking the OP of\n                    # f1_x, f1_y, f2_x, f2_y, ... with g1, g2, ...\n                    # this gives us\n                    # temp[point][f1x, f1y, f2x, f2y, ...][g_j].\n                    # reshape once to get temp[point][f_i][x/y][g_j]\n                    # transpose to get temp[point][x/y][f_i][g_j]\n                    # reshape to flatten the last two indices, this\n                    # gives us temp[point][x/y][full bf_i]\n                    # finally, transpose the first and last indices\n                    # to get temp[bf_i][x/y][point], and we are done.\n                    temp = numpy.array([numpy.outer(\n                                       Atab[alpha[0:Asdim]][..., j],\n                                       Btab[alpha[Asdim:Asdim+Bsdim]][..., j])\n                        for j in range(npoints)])\n                    assert temp.shape[1] % 2 == 0\n                    temp2 = temp.reshape((temp.shape[0],\n                                          temp.shape[1]//2,\n                                          2,\n                                          temp.shape[2]))\\\n                        .transpose(0, 2, 1, 3)\\\n                        .reshape((temp.shape[0], 2, -1))\\\n                        .transpose(2, 1, 0)\n                    result[alpha] = temp2\n                elif A_valuedim == 0 and B_valuedim == 1:\n                    # as above, with B's functions now vector-valued.\n                    # we now do... [numpy.outer ... for ...] gives\n                    # temp[point][f_i][g1x,g1y,g2x,g2y,...].\n                    # reshape to temp[point][f_i][g_j][x/y]\n                    # flatten middle: temp[point][full bf_i][x/y]\n                    # transpose to temp[bf_i][x/y][point]\n                    temp = numpy.array([numpy.outer(\n                        Atab[alpha[0:Asdim]][..., j],\n                        Btab[alpha[Asdim:Asdim+Bsdim]][..., j])\n                        for j in range(len(Atab[alpha[0:Asdim]][0]))])\n                    assert temp.shape[2] % 2 == 0\n                    temp2 = temp.reshape((temp.shape[0], temp.shape[1],\n                                          temp.shape[2]//2, 2))\\\n                        .reshape((temp.shape[0], -1, 2))\\\n                        .transpose(1, 2, 0)\n                    result[alpha] = temp2\n        return result\n\n    def value_shape(self):\n        \"\"\"Return the value shape of the finite element functions.\"\"\"\n        if len(self.A.value_shape()) == 0 and len(self.B.value_shape()) == 0:\n            return ()\n        elif len(self.A.value_shape()) == 1 and len(self.B.value_shape()) == 0:\n            return (self.A.value_shape()[0],)\n        elif len(self.A.value_shape()) == 0 and len(self.B.value_shape()) == 1:\n            return (self.B.value_shape()[0],)\n        else:\n            raise NotImplementedError(\"value_shape not implemented\")\n\n    def dmats(self):\n        \"\"\"Return dmats: expansion coefficients for basis function\n        derivatives.\"\"\"\n        raise NotImplementedError(\"dmats not implemented\")\n\n    def get_num_members(self, arg):\n        \"\"\"Return number of members of the expansion set.\"\"\"\n        raise NotImplementedError(\"get_num_members not implemented\")\n\n    def is_nodal(self):\n        # This element is nodal iff all factor elements are nodal.\n        return all([self.A.is_nodal(), self.B.is_nodal()])\n\n\nclass FlattenedDimensions(FiniteElement):\n    \"\"\"A wrapper class that flattens entity dimensions of a FIAT element defined\n    on a TensorProductCell to one with quadrilateral/hexahedron entities.\n    TensorProductCell has dimension defined as a tuple of factor element dimensions\n    (i, j) in 2D  and (i, j, k) in 3D.\n    Flattened dimension is a sum of the tuple elements.\"\"\"\n\n    def __init__(self, element):\n\n        nodes = element.dual.nodes\n        dim = element.ref_el.get_spatial_dimension()\n\n        if dim == 2:\n            ref_el = UFCQuadrilateral()\n        elif dim == 3:\n            ref_el = UFCHexahedron()\n        else:\n            raise ValueError(\"Illegal element dimension %s\" % dim)\n\n        entity_ids = element.dual.entity_ids\n\n        flat_entity_ids = flatten_entities(entity_ids)\n        dual = DualSet(nodes, ref_el, flat_entity_ids)\n        super(FlattenedDimensions, self).__init__(ref_el, dual, element.get_order(), element.get_formdegree(), element._mapping)\n        self.element = element\n\n        # Construct unflattening map for passing correct values to tabulate()\n        self.unflattening_map = compute_unflattening_map(self.element.ref_el.get_topology())\n\n    def degree(self):\n        \"\"\"Return the degree of the (embedding) polynomial space.\"\"\"\n        return self.element.degree()\n\n    def tabulate(self, order, points, entity=None):\n        \"\"\"Return tabulated values of derivatives up to given order of\n        basis functions at given points.\"\"\"\n        if entity is None:\n            entity = (self.get_reference_element().get_spatial_dimension(), 0)\n\n        # Entity is provided in flattened form (d, i)\n        # Appropriate product entity is taken from the unflattening_map dict\n        entity_dim, entity_id = entity\n        product_entity = self.unflattening_map[(entity_dim, entity_id)]\n\n        return self.element.tabulate(order, points, product_entity)\n\n    def value_shape(self):\n        \"\"\"Return the value shape of the finite element functions.\"\"\"\n        return self.element.value_shape()\n\n    def get_nodal_basis(self):\n        \"\"\"Return the nodal basis, encoded as a PolynomialSet object,\n        for the finite element.\"\"\"\n        raise self.element.get_nodal_basis()\n\n    def get_coeffs(self):\n        \"\"\"Return the expansion coefficients for the basis of the\n        finite element.\"\"\"\n        raise self.element.get_coeffs()\n\n    def dmats(self):\n        \"\"\"Return dmats: expansion coefficients for basis function\n        derivatives.\"\"\"\n        raise self.element.dmats()\n\n    def get_num_members(self, arg):\n        \"\"\"Return number of members of the expansion set.\"\"\"\n        raise self.element.get_num_members(arg)\n\n    def is_nodal(self):\n        # This element is nodal iff unflattened element is nodal.\n        return self.element.is_nodal()\n", "name": "/home/fenics/local/lib/python3.5/site-packages/FIAT/tensor_product.py"}], "git": {"head": {"committer_email": "chris@bpi.cam.ac.uk", "author_email": "chris@bpi.cam.ac.uk", "author_name": "Chris Richardson", "committer_name": "Chris Richardson", "id": "5c15f5cc8ddad0f76f13a7d910df7f71c44b4d8b", "message": "Merge branch 'chris/simplify-ref-el' into next"}, "branch": "next", "remotes": [{"url": "file:///home/bamboo/bamboo-home/xml-data/build-dir/_git-repositories-cache/851965aef3a42120bf8b908ed018deb045dd8cde", "name": "origin"}]}, "service_name": "coveralls-python", "config_file": ".coveragerc", "repo_token": "[secure]"}
==
Reporting 40 files
==

/home/fenics/local/lib/python3.5/site-packages/FIAT/P0.py - 25/55
/home/fenics/local/lib/python3.5/site-packages/FIAT/__init__.py - 38/76
/home/fenics/local/lib/python3.5/site-packages/FIAT/argyris.py - 79/156
/home/fenics/local/lib/python3.5/site-packages/FIAT/bell.py - 6/87
/home/fenics/local/lib/python3.5/site-packages/FIAT/brezzi_douglas_fortin_marini.py - 59/117
/home/fenics/local/lib/python3.5/site-packages/FIAT/brezzi_douglas_marini.py - 48/97
/home/fenics/local/lib/python3.5/site-packages/FIAT/bubble.py - 17/51
/home/fenics/local/lib/python3.5/site-packages/FIAT/crouzeix_raviart.py - 25/81
/home/fenics/local/lib/python3.5/site-packages/FIAT/discontinuous.py - 30/93
/home/fenics/local/lib/python3.5/site-packages/FIAT/discontinuous_lagrange.py - 28/67
/home/fenics/local/lib/python3.5/site-packages/FIAT/discontinuous_raviart_thomas.py - 36/80
/home/fenics/local/lib/python3.5/site-packages/FIAT/discontinuous_taylor.py - 28/67
/home/fenics/local/lib/python3.5/site-packages/FIAT/dual_set.py - 33/66
/home/fenics/local/lib/python3.5/site-packages/FIAT/enriched.py - 47/140
/home/fenics/local/lib/python3.5/site-packages/FIAT/expansions.py - 222/411
/home/fenics/local/lib/python3.5/site-packages/FIAT/finite_element.py - 97/255
/home/fenics/local/lib/python3.5/site-packages/FIAT/functional.py - 190/483
/home/fenics/local/lib/python3.5/site-packages/FIAT/gauss_legendre.py - 15/44
/home/fenics/local/lib/python3.5/site-packages/FIAT/gauss_lobatto_legendre.py - 15/44
/home/fenics/local/lib/python3.5/site-packages/FIAT/hdiv_trace.py - 147/413
/home/fenics/local/lib/python3.5/site-packages/FIAT/hdivcurl.py - 104/265
/home/fenics/local/lib/python3.5/site-packages/FIAT/hellan_herrmann_johnson.py - 50/108
/home/fenics/local/lib/python3.5/site-packages/FIAT/hermite.py - 41/87
/home/fenics/local/lib/python3.5/site-packages/FIAT/jacobi.py - 51/111
/home/fenics/local/lib/python3.5/site-packages/FIAT/lagrange.py - 23/57
/home/fenics/local/lib/python3.5/site-packages/FIAT/mixed.py - 33/120
/home/fenics/local/lib/python3.5/site-packages/FIAT/morley.py - 29/68
/home/fenics/local/lib/python3.5/site-packages/FIAT/nedelec.py - 161/307
/home/fenics/local/lib/python3.5/site-packages/FIAT/nedelec_second_kind.py - 90/240
/home/fenics/local/lib/python3.5/site-packages/FIAT/nodal_enriched.py - 64/141
/home/fenics/local/lib/python3.5/site-packages/FIAT/orthopoly.py - 49/382
/home/fenics/local/lib/python3.5/site-packages/FIAT/polynomial_set.py - 133/287
/home/fenics/local/lib/python3.5/site-packages/FIAT/quadrature.py - 153/320
/home/fenics/local/lib/python3.5/site-packages/FIAT/quadrature_element.py - 24/76
/home/fenics/local/lib/python3.5/site-packages/FIAT/quadrature_schemes.py - 101/299
/home/fenics/local/lib/python3.5/site-packages/FIAT/raviart_thomas.py - 68/151
/home/fenics/local/lib/python3.5/site-packages/FIAT/reference_element.py - 409/1052
/home/fenics/local/lib/python3.5/site-packages/FIAT/regge.py - 42/106
/home/fenics/local/lib/python3.5/site-packages/FIAT/restricted.py - 55/121
/home/fenics/local/lib/python3.5/site-packages/FIAT/tensor_product.py - 145/445
Coverage submitted!
Job ##91.1
https://coveralls.io/jobs/38537114
{'url': 'https://coveralls.io/jobs/38537114', 'message': 'Job ##91.1'}