From 831604566a4aa1e4251b84562ffea4716214fe4d Mon Sep 17 00:00:00 2001
From: Tim Daly
Date: Sun, 2 Jul 2017 21:59:07 -0400
Subject: [PATCH] \index{Wolfram, Stephen} \begin{chunk}{axiom.bib}
@book{Wolf91, author = "Wolfram, Stephen" title =
"Mathematica: A System for Doing Mathematics by Computer",
publisher = "Addison-Wesley", isbn = "978-0201515022",
year = "1991" }
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
\end{chunk}
\index{Char, Bruce}
\index{Geddes, Keith O.}
\index{Gonnet, Gaston H.}
\index{Leong, Benton}
\index{Monagan, Michael B.}
\index{Watt, Stephen M.}
\begin{chunk}{axiom.bib}
@book{Char91,
author = "Char, Bruce and Geddes, Keith O. and Gonnet, Gaston H. and
Leong, Benton and Monagan, Michael B. and Watt, Stephen M.",
title = "Maple V Language Reference Manual",
publisher = "Springer",
year = "1991",
isbn = "978-0-387-94124-0"
}
\end{chunk}
\index{Jenks, Richard D.}
\begin{chunk}{axiom.bib}
@techreport{Jenk70,
author = "Jenks, Richard D.",
title = "META/LISP: An interactive translator writing system",
type = "Research Report",
number = "RC 2968",
institution = "IBM Research",
year = "1970",
keywords = "axiomref"
}
\end{chunk}
\index{Smolka, Gert}
\begin{chunk}{axiom.bib}
@article{Smol88,
author = "Smolka, Gert",
title = "Logic Programming with Polymorphically Order-Sorted Types",
journal = "Lecture Notes in Computer Science",
volume = "343",
pages = "53-70",
year = "1988",
abstract =
"This paper presents the foundations for relational logic programming
with polymorphically order-sorted data types. This type discipline
combines the notion of parametric polymorphism [Milner 78], which has
been developed for higher-order functional programming [Harper et aL 86],
with the notion of order-sorted typing [Goguen 78, Smolka etal. 87],
which has been developed for equational first-order specification
and programming [Fatatsugi et al. 85]. Both notions are important
for practical reasons. With parametric polymorphism one avoids the
need for redefining lists and other parametric data types for every
type they are used with. Subsorts not only provide for more natural
type specifications, but also yield more computational power:
variables can be constrained to sorts rather than to single values and
typed unification computes directly with sort constraints, thus
reducing the need for expensive backtracking."
paper = "Smol88.pdf"
}
\end{chunk}
\index{Fruehwirth, Thom}
\index{Shapiro, Ehud}
\index{Vardi, Moshe Y.}
\index{Yardeni, Eyal}
\begin{chunk}{axiom.bib}
@inproceedings{Frue91,
author = "Fruehwirth, Thom and Shapiro, Ehud and Vardi, Moshe Y. and
Yardeni, Eyal",
title = "Logic programs as types for logic programs",
booktitle = "Proc. Sixth Annual IEEE Symp. on Logic in Comp. Sci.",
publisher = "IEEE",
pages = "300-309",
year = "1991",
abstract =
"Type checking can be extremely useful to the program development process.
Of particular interest are descriptive type systems, which let the
programmer write programs without having to define or mention types.
We consider here optimistic type systems for logic programs. In such
systems types are conservative approximations to the success set of the
program predicates. We propose the use of logic programs to describe
types. We argue that this approach unifies the denotational and
operational approaches to descriptive type systems and is simpler
and more natural than previous approaches. We focus on the use of
unary-predicate programs to describe types. We identify a proper class
of unary-predicate programs and show that it is expressive enough to
express several notions of types. We use an analogy with 2-way automata
and a correspondence with alternating algorithms to obtain a complexity
characterization of type inference and type checking. This
characterization was facilitated by the use of logic programs to
represent types.",
paper = "Frue91.pdf"
}
\end{chunk}
\index{Kifer, Michael}
\index{Wu, James}
\begin{chunk}{axiom.bib}
@inproceedings{Kife91,
author = "Kifer, Michael and Wu, James",
title = "A First-order Theory of Types and Polymorphism in Logic
Programming",
booktitle = "Proc Sixth Annual IEEE Symp. on Logic in Comp. Sci.",
year = "1991",
pages = "310-321",
abstract =
"A logic called typed predicate calculus (TPC) that gives declarative
meaning to logic programs with type declarations and type inference is
introduced. The proper interaction between parametric and inclusion
varieties of polymorphism is achieved through a construct called type
dependency, which is analogous to implication types but yields more
natural and succinct specifications. Unlike other proposals where
typing has extra-logical status, in TPC the notion of type-correctness
has precise model-theoretic meaning that is independent of any
specific type-checking or type-inference procedure. Moreover, many
different approaches to typing that were proposed in the past can be
studied and compared within the framework of TPC. Another novel
feature of TPC is its reflexivity with respect to type declarations;
in TPC, these declarations can be queried the same way as any other
data. Type reflexivity is useful for browsing knowledge bases and,
potentially, for debugging logic programs.",
paper = "Kife91.pdf"
}
\end{chunk}
\index{Butler, Greg}
\index{Cannon, John}
\begin{chunk}{axiom.bib}
@inproceedings{Butl90,
author = "Butler, Greg and Cannon, John",
title = "The Design of Cayley -- A Language for Modern Algebra",
booktitle = "DISCO 1990",
year = "1990",
pages = "10-19",
abstract =
"Established practice in the domain of modern algebra has shaped the
design of Cayley. The design has also been responsive to the needs of
its users. The requirements of the users include consistency with
common mathematical notation; appropriate data types such as sets,
sequences, mappings, algebraic structures and elements; efficiency;
extensibility; power of in-built functions and procedures for known
algorithms; and access to common examples of algebraic structures. We
discuss these influences on the design of Cayley's user language."
paper = "Butl90.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Kaliszyk, Cezary}
\index{Wiedijk, Freek}
\begin{chunk}{axiom.bib}
@article{Kali07,
author = "Kaliszyk, Cezary and Wiedijk, Freek",
title = "Certified Computer Algebra on Top of an Interactive Theorem
Prover",
journal = "LNAI",
volume = "4573",
pages = "94-105",
year = "2007",
abstract =
"We present a prototype of a computer algebra system that is built on
top of a proof assistant, HOL Light. This architecture guar- antees
that one can be certain that the system will make no mistakes. All
expressions in the system will have precise semantics, and the proof
assistant will check the correctness of all simplifications according
to this semantics. The system actually {\sl proves} each simplification
performed by the computer algebra system.
Although our system is built on top of a proof assistant, we designed
the user interface to be very close in spirit to the interface of
systems like Maple and Mathematica. The system, therefore, allows the
user to easily probe the underlying automation of the proof assistant
for strengths and weaknesses with respect to the automation of
mainstream computer algebra systems. The system that we present is a
prototype, but can be straightforwardly scaled up to a practical
computer algebra system",
paper = "Kali07.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Chapman, Peter}
\index{McKinna, James}
\index{Urban, Christian}
\begin{chunk}{axiom.bib}
@article{Chap08,
author = "Chapman, Peter and McKinna, James and Urban, Christian",
title = "Mechanising a Proof of Craig's Interpoolation Theorem for
Intuitionistic Logic in Nominal Isabelle",
journal = "LNAI",
volume = "5144",
year = "2008",
pages = "38-52",
abstract =
"Craig's Interpolation Theorem is an important meta-theoretical result
for several logics. Here we describe a formalisation of the result for
first-order intuitionistic logif without function symbols or equality,
with the intention of giving insight into how other such results in
proof theory might be mechanically verified, notable cut-admissibility.
We use the package {\sl Nominal Isabelle}, which easily deals with the
binding issues in the quantifier cases of the proof",
}
\end{chunk}
\index{Lobachev, Oleg}
\index{Loogen, Rita}
\begin{chunk}{axiom.bib}
@article{Loba08,
author = "Lobachev, Oleg and Loogen, Rita",
title = "Towards an Implementation of a Computer Algebra System in a
Functional Language",
journal = "LNAI",
year = "2008",
pages = "141-154",
abstract =
"This paper discusses the pros and cons of using a functional language
for implementing a computer algebra system. The contributions of the
paper are twofold. Firstly, we discuss some language-centered design
aspects of a computer algebra system -- the ``language unity''
concept. Secondly, we provide an implementation of a fast polynomial
multiplication algorithm, which is one of the core elements of a
computer algebra system. The goal of the paper is to test the
feasibility of an implementation of (some elements of) a computer
algebra system in a modern functional language.",
paper = "Loba08.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Butler, Greg}
\begin{chunk}{axiom.bib}
@article{Butl96,
author = "Butler, Greg",
title = "Software Architectures for Computer Algebra: A Case Study",
journal = "DISCO 96",
year = "1996",
pages = "277-285",
abstract =
"The architectures of the existing computer algebra systems have not
been discussed sufficiently in the literature. Instead, the focus
has been on the design of the related programming language, or the
design of a few key data structures.
We address this deficiency with a case study of the architecture of
CAYLEY. Our aim is twofold: to capture this knowledge before the total
passing of a system now made obsolete by MAGMA; and to encourage others
to describe the architecture of the computer algebra systems with which
they are familiar.
The long-term goal is a better understanding of how to construct
computer algebra systems in the future.",
paper = "Butl96.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Mosses, Peter D.}
\begin{chunk}{axiom.bib}
@article{Moss93,
author = "Mosses, Peter D.",
title = "The Use of Sorts in Algebraic Specifications",
journal = "Recent Trends in Data Type Specification"
year = "1993",
pages = "66-91",
abstract =
"Algebraic specification frameworks exploit a variety of sort
disciplines. The treatment of sorts has a considerable influence on
the ease with which such features as partially and polymorhism can be
specified. This survey gives an accessible overview of various
frameworks, focusing on their sort disciplines and assessing their
strengths and weaknesses for practical applications. Familiarity with
the basic notions of algebraic specification is assumed.",
paper = "Moss93.pdf"
}
\end{chunk}
\index{Kredel, Heinz}
\begin{chunk}{axiom.bib}
@article{Kred08,
author = "Kredel, Heinz",
title = "Unique Factorization Domains in the Java Computer Algebra System",
journal = "Automated Deduction in Geometry",
year = "2008",
pages = "86-115",
abstract =
"This paper describes the implementation of recursive algorithms in
unique factorization domains, namely multivariate polynomial greatest
common divisors (gcd) and factorization into irreducible parts in the
Java computer algebra library (JAS). The implementation of gcds,
resultants and factorization is part of the essential building blocks
for any computation in algebraic geometry, in particular in automated
deduction in geometry. There are various implementations of these
algorithms in procedural programming languages. Our aim is an
implementation in a modern object oriented programming language with
generic data types, as it is provided by Java programming language. We
exemplify that the type design and implementation of JAS is suitable
for the implementation of several greatest common divisor algorithms
and factorization of multivariate polynomials. Due to the design we
can employ this package in very general settings not commonly seen in
other computer algebra systems. As for example, in the coefficient
arithmetic for advanced Gröbner basis computations like in polynomial
rings over rational function fields or (finite, commutative) regular
rings. The new package provides factory methods for the selection of
one of the several implementations for non experts. Further we
introduce a parallel proxy for gcd implementations which runs
different implementations concurrently.",
paper = "Kred08,pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Harrison, John}
\index{Thery, Laurent}
\begin{chunk}{axiom.bib}
@article{Harr94,
author = "Harrison, John and Thery, Laurent",
title = "Extending the HOL theorem prover with a computer algebra
system to reason about the reals",
journal = "Lecture Notes in Computer Science",
volume = "780",
pages = "174-184",
year = "2005",
abstract =
"In this paper we describe an environment for reasoning about the reals
which combines the rigour of a theorem prover with thw power of a
computer algebra system."
paper = "Harr94.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Medina-Bulo, I.}
\index{Palomo-Lozano, F.}
\index{Alonso-Jimenez, J.A.}
\index{Ruiz-Reina, J.L.}
\begin{chunk}{axiom.bib}
@inproceedings{Medi04,
author = "Medina-Bulo, I. and Palomo-Lozano, F. and Alonso-Jimenez, J.A.
and Ruiz-Reina, J.L.",
title = "Verified Computer Algebra in ACL2",
booktitle = "ISSAC 04, LNCS Volume 3249",
year = "2004",
pages = "171-184",
abstract =
"In this paper, we present the formal verification of a COMMON LISP
implementation of Buchberger's algorithm for computing Groebner bases
of polynomial ideals. This work is carried out in the ACL2 system and
shows how verified Computer Algebra can be achieved in an executable
logic.",
paper = "Medi04.pdf"
}
\end{chunk}
\index{Davenport, James H.}
\begin{chunk}{axiom.bib}
@inproceedings{Dave08,
author = "Davenport, James H."
title = "Effective Set Membership in Computer Algebra and Beyond",
booktitle = "Int. Conf. on Intelligent Computer Mathematics",
pages = "266-269",
year = "2008",
journal = "LNCS",
volume = "5144",
abstract =
"In previous work we showed the importance of distinguishing ``I know
$X\ne Y$'' from ``I don't know that $X=Y$''. IN this paper we look at
effective set membership, starting with Groebner bases, where the
issues are well-expressed in algebra systems, and going on to integration
and other questions of 'computer calculus'.
In particular, we claim that a better recognition of the role of set
membership would clarify some features of computer algebra systems,
such as 'what does an integral mean as output'.",
paper = "Dave08.pdf"
}
\end{chunk}
\index{Ueberberg, Johannes}
\begin{chunk}{axiom.bib}
@inproceedings{Uebe94,
author = "Ueberberg, Johannes",
title = "Interactive theorem proving and computer algebra",
booktitle = "AISMC 94",
year = "1994",
pages = "1-9",
paper = "Uebe94.pdf"
}
\end{chunk}
\index{Jolly, Raphael}
\begin{chunk}{axiom.bib}
@inproceedings{Joll13,
author = "Jolly, Raphael",
title = "Categories as Type Classes in the Scala Algebra System",
booktitle = "CASC 2013",
year = "2013",
pages = "209-218",
journal = "LNCS",
volume = "8136",
abstract =
"A characterization of the categorical view of computer algebra is
proposed. Some requirements on the ability for abstraction that
programming languages must have in order to allow a categorical
approach is given. Object-oriented inheritance is presented as a
suitable abstraction scheme and exemplified by the Java Algebra
System. Type classes are then introduced as an alternate abstraction
scheme and shown to be eventually better suited for modeling
categories. Pro and cons of the two approaches are discussed and a
hybrid solution is exhibited.",
paper = "Joll13.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Aladjev, Victor}
\begin{chunk}{axiom.bib}
@inproceedings{Alad03,
author = "Aladjev, Victor",
title = "Computer Algebra System Maple: A New Software Library",
booktitle = "ICCS 2003",
year = "2003",
pages = "711-717",
journal = "LNCS",
volume = "2657",
abstract =
"The paper represents Maple library containing more than 400
procedures expanding possibilities of the Maple package of releases
6,7 and 8. The library is structurally organized similarly to the main
Maple library. The process of the library installing is simple enough
as a result of which the above library will be logically linked with
the main Maple library, supporting access to software located in it
equally with standard Maple software. The demo library is delivered
free of charge at request to addresses mentioned above.",
paper = "Alad03.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Jackson, Paul}
\begin{chunk}{axiom.bib}
@inproceedings{Jack94,
author = "Jackson, Paul",
title = "Exploring abstract algebra in constructive type theory",
booktitle = "CADE 1994",
year = "1994",
pages = "590-604",
journal = "LNCS",
volume = "814",
abstract =
"I describe my implementation of computational abstract algebra in the
Nuprl system. I focus on my development of multivariate polynomials. I
show how I use Nuprl's expressive type theory to define classes of
free abelian monoids and free monoid algebras. These classes are
combined to create a class of all implementations of polynomials. I
discuss the issues of subtyping and computational content that came up
in designing the class definitions. I give examples of relevant theory
developments, tactics and proofs. I consider how Nuprl could act as an
algebraic ‘oracle’ for a computer algebra system and the relevance of
this work for abstract functional programming.",
paper = "Jack94.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{van Hulzen, J. A.}
\begin{chunk}{axiom.bib}
@inproceedings{Hulz82,
author = "van Hulzen, J. A.",
title = "Computer algebra systems viewed by a notorious user",
booktitle = "EUROCAM 1982",
pages "166-180",
year = "1982",
journal = "LNCS",
volume = "144",
abstract =
"Are design and use of computer algebra systems disjoint or
complementary activities? Raising and answering this question are
equally controversial, since a clear distinction between languages
features and library facilities is hard to make. Instead of even
attempting to answer this rather academic question it is argued why it
is reasonable to raise related questions: Is SMP a paradox? Is it
realistic to neglect inaccurate input data? Is a very high level
programming language instrumental for equal opportunity employment in
scientific research?",
paper = "Hulz82.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Caviness, B.F.}
\begin{chunk}{axiom.bib}
@inproceedings{Cavi85,
author = "Caviness, B.F.",
title = "Computer Algebra: Past and Future",
booktitle = "EUROCAL 1985",
year = "1985",
pages = "1-18",
journal = "LNCS",
volume = "203",
abstract =
"The preceding just touches on some of the highlights of the
accomplishments and unsolved problems in computer algebra. A really
comprehensive survey would be much too long for the space available
here. I close with the following quote, which has been attributed to
Albert Einstein and helps, perhaps, to keep a proper prospective on
our work: 'The symbolic representation of abstract entities is doomed
to its rightful place of relative insignificance in a world in which
flowers and beautiful women abound.'",
paper = "Cavi85.pdf"
}
\end{chunk}
\index{Schreiner, Wolfgang}
\index{Danielczyk-Landerl, Werner}
\index{Marin, Mircea}
\index{Stocher, Wolfgang}
\begin{chunk}{axiom.bib}
@inproceedings{Schr00,
author = "Schreiner, Wolfgang and Danielczyk-Landerl, Werner and
Marin, Mircea and Stocher, Wolfgang",
title = "A Generic Programming Environment for High-Performance
Mathematical Libraries",
booktitle = "Generic Programming",
year = "2000",
pages = "256-267",
journal = "LNCS",
volume = "1766",
abstract =
"We report on a programming environment for the development of generic
mathematical libraries based on functors (parameterized modules) that
have rigorously specified but very abstract interfaces. We focus on
the combination of the functor-based programming style with software
engineering principles in large development projects. The generated
target code is highly efficient and can be easily embedded into
foreign application environments.",
paper = "Schr00.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Magaud, Nicolas}
\index{Narboux, Julien}
\index{Schreck, Pascal}
\begin{chunk}{axiom.bib}
@inproceedings{Maga08,
author = "Magaud, Nicolas and Narboux, Julien and Schreck, Pascal",
title = "Formalizing Project Plane Geometry in Coq",
booktitle = "Automated Deduction in Geometry",
year = "2008",
pages = "141-162",
journal = "LNCS",
volume = "6301",
abstract =
"We investigate how projective plane geometry can be formalized in a
proof assistant such as Coq. Such a formalization increases the
reliability of textbook proofs whose details and particular cases are
often overlooked and left to the reader as exercises. Projective plane
geometry is described through two different axiom systems which are
formally proved equivalent. Usual properties such as decidability of
equality of points (and lines) are then proved in a constructive
way. The duality principle as well as formal models of projective
plane geometry are then studied and implemented in Coq. Finally, we
formally prove in Coq that Desargues’ property is independent of the
axioms of projective plane geometry.",
paper = "Maga08.pdf"
}
\end{chunk}
\index{Fevre, Stephane}
\index{Wang, Dongming}
\begin{chunk}{axiom.bib}
@inproceedings{Fevr98,
author = "Fevre, Stephane and Wang, Dongming",
title = "Proving Geometric Theorems using Clifford Algebra and
Rewrite Rules",
booktitle = "Conf. on Automated Deduction",
year = "1998",
pages = "17-32",
journal = "LNCS",
volume = "1421",
abstract =
"We consider geometric theorems that can be stated constructively by
introducing points, while each newly introduced point may be
represented in terms of the previously constructed points using
Clifford algebraic operators. To prove a concrete theorem, one first
substitutes the expressions of the dependent points into the
conclusion Clifford polynomial to obtain an expression that involves
only the free points and parameters. A term-rewriting system is
developed that can simplify such an expression to 0, and thus prove
the theorem. A large class of theorems can be proved effectively in
this coordinate-free manner. This paper describes the method in detail
and reports on our preliminary experiments.",
paper = "Fevr98.pdf"
}
\end{chunk}
\index{Kerber, Manfred}
\index{Kohlhase, Michael}
\index{Sorge, Volker}
\begin{chunk}{axiom.bib}
@article{Kerb98,
author = "Kerber, Manfred and Kohlhase, Michael and Sorge, Volker",
title = "Integrating Computer Algebra with Proof Planning",
journal = "Journal of Automated Reasoning",
volume = "21",
number = "3",
pages = "327-355",
year = "1998",
abstract =
"Mechanized reasoning systems and computer algebra systems have
different objectives. Their integration is highly desirable, since
formal proofs often involve both of the two different tasks proving
and calculating. Even more important, proof and computation are often
interwoven and not easily separable.
In this article we advocate an integration of computer algebra into
mechanized reasoning systems at the proof plan level. This approach
allows us to view the computer algebra algorithms as methods, that is,
declarative representations of the problem-solving knowledge specific
to a certain mathematical domain. Automation can be achieved in many
cases by searching for a hierarchic proof plan at the method level by
using suitable domain-specific control knowledge about the
mathematical algorithms. In other words, the uniform framework of
proof planning allows us to solve a large class of problems that are
not automatically solvable by separate systems.
Our approach also gives an answer to the correctness problems inherent
in such an integration. We advocate an approach where the computer
algebra system produces high-level protocol information that can be
processed by an interface to derive proof plans. Such a proof plan in
turn can be expanded to proofs at different levels of abstraction, so
the approach is well suited for producing a high-level verbalized
explication as well as for a low-level, machine-checkable,
calculus-level proof.
We present an implementation of our ideas and exemplify them using an
automatically solved example.",
paper = "Kerb98.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Loos, Ruediger G. K.}
\begin{chunk}{axiom.bib}
@article{Loos74,
author = "Loos, Ruediger G. K.",
title = "Toward a Formal Implementation of Computer Algebra",
journal = "SIGSAM",
volume = "8",
number = "3",
pages = "9-16",
year = "1974",
abstract =
"We consider in this paper the task of synthesizing an algebraic
system. Today the task is significantly simpler than in the pioneer
days of symbol manipulation, mainly because of the work done by the
pioneers in our area, but also because of the progress in other areas
of Computer Science. There is now a considerable collection of
algebraic algorithms at hand and a much better understanding of data
structures and programming constructs than only a few years ago.",
paper = "Loos74.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Gries, David}
\begin{chunk}{axiom.bib}
@book{Grie78,
author = "Gries, David",
title = "Programmnig Methodology",
publisher = "Springer-Verlag",
year = "1978"
}
\end{chunk}
\index{Wirsing, Martin}
\begin{chunk}{axiom.bib}
@InCollection{Wirs91,
author = "Wirsing, Martin",
title = "Algebraic Specification",
booktitle = "Handbook of Theoretical Computer Science (Vol B)",
publisher = "MIT Press",
year = "1991",
pages = "675-788",
chapter = "13",
isbn = "0-444-88074-7"
}
\end{chunk}
\index{Jones, Simon Peyton}
\begin{chunk}{axiom.bib}
@techreport{Jone98,
author = "Jones, Simon Peyton",
title = "The Haskell 98 Language and LIbraries. The Revised Report",
institution = "Cambridge University Press",
year = "1998",
type = "technical report",
link = "\url{https://www.haskell.org/definition/haskell98-report.pdf}"
}
\end{chunk}
\index{Aho, Alfred V.}
\index{Sethi, Ravi}
\index{Ullman, Jeffrey D.}
\begin{chunk}{axiom.bib}
@book{Ahox86,
author = "Aho, Alfred V. and Sethi, Ravi and Ullman, Jeffrey D.",
title = "Compilers: Principles, Techniques, and Tools",
year = "1986",
publisher = "Addison-Wesley",
isbn = "978-0201100884"
}
\end{chunk}
\index{Mitchell, John C.}
\begin{chunk}{axiom.bib}
@InCollection{Mitc91,
author = "Mitchell, John C.",
title = "Type Systems for Programming Languages",
booktitle = "Handbook of Theoretical Computer Science (Vol B.)".
pages = "365-458",
year = "1991",
publisher = "MIT Press",
isbn = "0-444-88074-7"
}
\end{chunk}
\index{Strachey, Christopher}
\begin{chunk}{axiom.bib}
@article{Stra00,
author = "Strachey, Christopher",
title = "Fundamental Concepts in Programming Languages",
journal = "Higher-Order and Symbolic Computation",
volume = "13",
number = "1-2",
pages = "11-49",
year = "2000"
abstract =
"This paper forms the substance of a course of lectures given at the
International Summer School in Computer Programming at Copenhagen in
August, 1967. The lectures were originally given from notes and the
paper was written after the course was finished. In spite of this, and
only partly because of the shortage of time, the paper still retains
many of the shortcomings of a lecture course. The chief of these are
an uncertainty of aim—it is never quite clear what sort of audience
there will be for such lectures—and an associated switching from
formal to informal modes of presentation which may well be less
acceptable in print than it is natural in the lecture room. For these
(and other) faults, I apologise to the reader.
There are numerous references throughout the course to CPL [1–3]. This
is a programming language which has been under development since 1962
at Cambridge and London and Oxford. It has served as a vehicle for
research into both programming languages and the design of
compilers. Partial implementations exist at Cambridge and London. The
language is still evolving so that there is no definitive manual
available yet. We hope to reach another resting point in its evolution
quite soon and to produce a compiler and reference manuals for this
version. The compiler will probably be written in such a way that it
is relatively easyto transfer it to another machine, and in the first
instance we hope to establish it on three or four machines more or
less at the same time.
The lack of a precise formulation for CPL should not cause much
difficulty in this course, as we are primarily concerned with the
ideas and concepts involved rather than with their precise
representation in a programming language.",
paper = "Stra00.pdf"
}
\end{chunk}
\index{Goguen, Joseph}
\index{Meseguer, Jose}
\begin{chunk}{axiom.bib}
@techreport{Gogu89,
author = "Goguen, Joseph and Meseguer, Jose",
title = "Order-sorted Algebra I : Equational Deduction for Multiple
Inheritance, Overloading, Exceptions, and Partial Operations",
type = "technical report",
institution = "SRI International",
year = "1989"
number = "SRIR 89-10"
}
\end{chunk}
\index{Hindley, R.}
\begin{chunk}{axiom.bib}
@article{Hind69,
author = "Hindley, R.",
title = "The Principal Type-Scheme of an Object in Combinatory Logic",
journal = "Trans. AMS",
volume = "146",
year = "1969",
pages = "29-60",
paper = "Hind69.pdf"
}
\end{chunk}
\index{Milner, Robin}
\begin{chunk}{axiom.bib}
@article{Miln78,
author = "Milner, Robin",
title = "A Theory of Type Polymorphism in Programming",
journal = "J. Computer and System Sciences",
volume = "17",
number = "3",
year = "1978",
pages = "348-375",
abstract =
"The aim of this work is largely a practical one. A widely employed
style of programming, particularly in structure-processing languages
which impose no discipline of types, entails defining procedures which
work well on objects of a wide variety. We present a formal type
discipline for such polymorphic procedures in the context of a simple
programming language, and a compile time type-checking algorithm which
enforces the discipline. A Semantic Soundness Theorem (based on a
formal semantics for the language) states that well-type programs
cannot “go wrong” and a Syntactic Soundness Theorem states that if
accepts a program then it is well typed. We also discuss extending
these results to richer languages; a type-checking algorithm based on
is in fact already implemented and working, for the metalanguage ML in
the Edinburgh LCF system.",
paper = "Miln78.pdf"
}
\end{chunk}
\index{Damas, Luis}
\index{Milner, Robin}
\begin{chunk}{axiom.bib}
@inproceedings{Dama82,
author = "Damas, Luis and Milner, Robin",
title = "Principal Type-schemes for Functional Programs",
booktitle = "POPL 82",
pages = "207-212",
year = "1982",
isbn = "0-89798-065-6",
paper = "Dama82.pdf"
}
\end{chunk}
\index{Milner, R.}
\index{Torte, M.}
\index{Harper, R.}
\begin{chunk}{axiom.bib}
@book{Miln90,
author = "Milner, Robin and Torte, Mads and Harper, Robert",
title = "The Definition of Standard ML",
publisher = "Lab for Foundations of Computer Science, Univ. Edinburgh",
link = "\url{http://sml-family.org/sml90-defn.pdf}",
year = "1990",
paper = "Miln90.pdf"
}
\end{chunk}
\index{Milner, R.}
\index{Torte, M.}
\begin{chunk}{axiom.bib}
@book{Miln91,
author = "Milner, Robin and Torte, Mads",
title = "Commentary on Standard ML",
publisher = "Lab for Foundations of Computer Science, Univ. Edinburgh",
link = "\url{https://pdfs.semanticscholar.org/d199/16cbbda01c06b6eafa0756416e8b6f15ff44.pdf}",
year = "1991",
paper = "Miln91.pdf"
}
\end{chunk}
\index{Turner, D.A.}
\begin{chunk}{axiom.bib}
@article{Turn85,
author = "Turner, D. A.",
title = "Miranda: A non-strict functional language with polymorphic types",
journal = "Lecture Notes in Computer Science",
volume = "201",
pages = "1-16",
year = "1985",
link = "\url{http://miranda.org.uk/nancy.html}",
paper = "Turn85.pdf"
}
\end{chunk}
\index{Turner, D.A.}
\begin{chunk}{axiom.bib}
@article{Turn86,
author = "Turner, D. A.",
title = "An Overview of Miranda",
journal = "SIGPLAN Notices",
volume = "21",
number = "12",
pages = "158-166",
year = "1986",
link = "\url{http://miranda.org.uk/}",
paper = "Turn86.pdf"
}
\end{chunk}
\index{Foderaro, John K.}
\begin{chunk}{axiom.bib}
@phdthesis{Fode83,
author = "Foderaro, John K.",
title = "The Design of a Language for Algebraic Computation Systems",
school = "U.C. Berkeley, EECS Dept.",
year = "1983",
link = "\url{http://digitalassets.lib.berkeley.edu/techreports/ucb/text/CSD-83-160.pdf}",
abstract =
"This thesis describes the design of a language to support a
mathematics-oriented symbolic algebra system. The language, which we
have named NEWSPEAK, permits the complex interrelations of
mathematical types, such as rings, fields and polynomials to be
described. Functions can be written over the most general type that
has the required operations and properties and the inherited by
subtypes. All function calls are generic, with most function
resolution done at compile time. Newspeak is type-safe, yet permits
runtime creation of tyhpes.",
paper = "Fode83.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Fuh, You-Chin}
\index{Mishra, Prateek}
\begin{chunk}{axiom.bib}
@article{Fuhx90,
author = "Fuh, You-Chin",
title = "Type Inference with Subtypes",
journal = "Theoretical Computer Science",
volume = "73",
number = "2",
year = "1990",
pages = "155-175",
abstract =
"We extend polymorphic type inference with a very general notion of
subtype based on the concept of type transformation. This paper
describes the following results. We prove the existence of (i)
principal type property and (ii) syntactic completeness of the
type-checker, for type inference with subtypes. This result is
developed with only minimal assumptions on the underlying theory of
subtypes. As a consequence, it can be used as the basis for type
inference with a broad class of subtype theories. For a particular
“structural” theory of subtypes, those engendered by inclusions
between type constants only, we show that principal types are
compactly expressible. This suggests that type inference for the
structured theory of subtypes is feasible. We describe algorithms
necessary for such a system. The main algorithm we develop is called
MATCH, an extension to the classical unification algorithm. A proof of
correctness for MATCH is given.",
paper = "Fuhx90.pdf"
}
\end{chunk}
\index{Nipkow, Tobias}
\index{Snelting, Gregor}
\begin{chunk}{axiom.bib}
@inproceedings{Nipk91,
author = "Nipkow, Tobias and Snelting, Gregor",
title = "Type Classes and Overloading Resolution via Order-Sorted
Unification",
booktitle = "Proc 5th ACM Conf. Functional Prog. Lang. and Comp. Arch.",
year = "1991",
publisher = "Springer",
journal = "LNCS",
volume = "523",
pages = "1-14",
abstract =
"We present a type inference algorithm for a Haskell-like language
based on order-sorted unification. The language features polymorphism,
overloading, type classes and multiple inheritance. Class and instance
declarations give rise to an order-sorted algebra of types. Type
inference essentially reduces to the Hindley/Milner algorithm where
unification takes place in this order-sorted algebra of types. The
theory of order-sorted unification provides simple sufficient
conditions which ensure the existence of principal types. The
semantics of the language is given by a translation into ordinary
lambda-calculus. We prove the correctness of our type inference
algorithm with respect to this semantics."
}
\end{chunk}
\index{Schmidt-Schauss, M.}
\begin{chunk}{axiom.bib}
@book{Schm89,
author = "Schmidt-Schauss, M.",
title = "Computational Aspects of an Order-Sorted Logic with Term
Declarations",
publisher = "Springer",
isbn = "978-3-540-51705-4",
year = "1989"
}
\end{chunk}
\index{Smolka, G.}
\index{Nutt, W.}
\index{Goguen, J.}
\index{Meseguer, J.}
\begin{chunk}{axiom.bib}
@InCollection{Smol89,
author = "Smolka, G. and Nutt, W. and Goguen, J. and Meseguer, J.",
title = "Order-sorted Equational Computation",
booktitle = "Resolution of Equations in Algebra Structures (Vol 2)",
pages = "297-367",
year = "1989"
}
\end{chunk}
\index{Waldmann, Uwe}
\begin{chunk}{axiom.bib}
@article{Wald92,
author = "Waldmann, Uwe",
title = "Semantics of Order-sorted Specifications",
journal = "Theoretical Computer Science",
volume = "94",
number = "1-2",
year = "1992",
pages = "1-35",
abstract =
"Order-sorted specifications (i.e. many-sorted specifications with
subsort relations) have been proved to be a useful tool for the
description of partially defined functions and error handling in
abstract data types.
Several definitions for order-sorted algebras have been proposed. In
some papers an operator symbol, which may be multiply declared, is
interpreted by a family of functions (“overloaded” algebras). In other
papers it is always interpreted by a single function (“nonoverloaded”
algebras). On the one hand, we try to demonstrate the differences
between these two approaches with respect to equality, rewriting and
completion; on the other hand, we prove that in fact both theories can
be studied in parallel provided that certain notions are suitably
defined.
The overloaded approach differs from the many-sorted and the
nonoverloaded one in that the overloaded term algebra is not
necessarily initial. We give a decidable sufficient criterion for the
initiality of the term algebra, which is less restrictive than
GJM-regularity as proposed by Goguen, Jouannaud and Meseguer.
Sort-decreasingness is an important property of rewrite systems since
it ensures that confluence and Church-Rosser property are equivalent,
that the overloaded and nonoverloaded rewrite relations agree, and
that variable overlaps do not yield critical pairs. We prove that it
is decidable whether or not a rewrite rule is sort-decreasing, even if
the signature is not regular.
Finally, we demonstrate that every overloaded completion procedure may
also be used in the nonoverloaded world, but not conversely, and that
specifications exist that can only be completed using the
nonoverloaded semantics.",
paper = "Wald92.pdf"
}
\end{chunk}
\index{Comon, Hubert}
\begin{chunk}{axiom.bib}
@inproceedings{Como90,
author = "Comon, Hubert",
title = "Equational Formulas in Order-sorted Algebras",
booktitle = "IICALP 90. Automata, Languages and Programming",
year = "1990",
pages = "674-688",
abstract =
"We propose a set of transformation rules for first order formulas
whose atoms are either equations between terms or “sort constraints” t
ε s where s is a regular tree language (or a sort in the algebraic
specification community). This set of rules is proved to be correct,
terminating and complete. This shows in particular that the first
order theory of any rational tree language is decidable, extending the
results of [Mal71,CL89,Mah88]. We also show how to apply our results
to automatic inductive proofs in equational theories."
}
\end{chunk}
\index{MacLane, Saunders}
\begin{chunk}{axiom.bib}
@book{Macl91,
author = "MacLane, Saunders",
title = "Categories for the Working Mathematician",
publisher = "Springer",
year = "1991",
isbn = "0-387-98403-8",
link = "\url{http://www.maths.ed.ac.uk/~aar/papers/maclanecat.pdf}",
paper = "Macl91.pdf"
}
\end{chunk}
\index{Schubert, Horst}
\begin{chunk}{axiom.bib}
@book{Schu72,
author = "Schubert, Horst",
title = "Categories",
publisher = "Springer-Verlag",
year = "1972"
}
\end{chunk}
\index{Freyd, Peter J.}
\index{Scedrov, Andre}
\begin{chunk}{axiom.bib}
@book{Frey90,
author = "Freyd, Peter J. and Scedrov, Andre",
title = "Categories, Allegories",
publisher = "Elsevier Science",
year = "1990",
isbn = "0-444-70368-3"
}
\end{chunk}
\index{Rydeheard, D. E.}
\index{Burstall, R. M.}
\begin{chunk}{axiom.bib}
@book{Ryde88,
author = "Rydeheard, D. E. and Burstall, R. M.",
title = "Computational Category Theory",
publisher = "Prentice Hall",
year = "1988",
isbn = "978-0131627369"
}
\end{chunk}
\index{Ehrig, Hartmut}
\index{Mahr, Bernd}
\begin{chunk}{axiom.bib}
@book{Ehri85,
author = "Ehrig, Hartmut and Mahr, Bernd",
title = "Fundamentals of Algebraic Specification 1: Equations and
Initial Semantics",
publisher = "Springer Verlag",
year = "1985",
isbn = "978-0387137186"
}
\end{chunk}
\index{Loos, Rudiger}
\begin{chunk}{axiom.bib}
@article{Loos72,
author = "Loos, Rudiger",
title = "Algebraic Algorithm Descriptions as Programs",
journal = "ACM SIGSAM Bulletin",
volume = "23",
year = "1972",
pages = "16-24",
abstract =
"We propose methods for writing algebraic programs in an algebraic
notation. We discuss the advantages of this approach and a specific
example",
paper = "Loos72.pdf"
}
\end{chunk}
\index{Loos, Rudiger}
\begin{chunk}{axiom.bib}
@article{Loos76,
author = "Loos, Rudiger",
title = "The Algorithm Description Language (ALDES) (report)",
journal = "ACM SIGSAM Bulletin",
volume = "10",
number = "1",
year = "1976",
pages = "14-38",
abstract =
"ALDES is a formalization of the method to describe algorithms used in
Knuth's books. The largest documentation of algebraic algorithms,
Collins' SAC system for Computer Algebra, is written in this
language. In contrast to PASCAL it provides automatic storage
deallocation. Compared to LISP equal emphasis was placed on efficiency
of arithmetic, list processing, and array handling. To allow the
programmer full control of efficiency all mechanisms of the system are
accessible to him. Currently ALDES is available as a preprocessor to
ANSI Fortran, using no additional primitives.",
paper = "Loos76.pdf"
}
\end{chunk}
\index{Loos, Rudiger}
\index{Collins, George E.}
\begin{chunk}{axiom.bib}
@book{Loos92,
author = "Loos, Rudiger and Collins, George E.",
title = "Revised Report on the ALgorithm Language ALDES",
publisher = "Institut fur Informatik",
year = "1992"
}
\end{chunk}
\index{Collins, George E.}
\index{Loos, Rudiger}
\begin{chunk}{axiom.bib}
@techreport{Coll90,
author = "Collins, George E. and Loos, Rudiger",
title = "Specification and Index of SAC-2 Algorithms",
institution = "Univ. of Tubingen",
type = "technical report",
year = "1990",
number = "WSI-90-4"
}
\end{chunk}
\index{Buendgen, R.}
\index{Hagel, G.}
\index{Loos, R.}
\index{Seitz, S.}
\index{Simon, G.}
\index{Stuebner, R.}
\index{Weber, A.}
\begin{chunk}{axiom.bib}
@article{Buen91,
author = "Buendgen, R. and Hagel, G. and Loos, R. and Seitz, S. and
Simon, G. and Stuebner, R. and Weber, A.",
title = "SAC-2 in ALDES -- Ein Werkzeug fur dis Algorithmenforschung",
journal = "MathPAD 1",
volume = "3",
year = "1991",
pages = "33-37"
}
\end{chunk}
\index{Weber, Andreas}
\begin{chunk}{axiom.bib}
@techreport{Webe92
author = "Weber, Andreas",
title = "Structuring the Type System of a Computer Algebra System",
link = "\url{http://cg.cs.uni-bonn.de/personal-pages/weber/publications/pdf/WeberA/Weber92a.pdf}",
institution = "Wilhelm-Schickard-Institut fur Informatik",
year = "1992",
abstract = "
Most existing computer algebra systems are pure symbol manipulating
systems without language support for the occuring types. This is
mainly due to the fact taht the occurring types are much more
complicated than in traditional programming languages. In the last
decade the study of type systems has become an active area of
research. We will give a proposal for a type system showing that
several problems for a type system of a symbolic computation system
can be solved by using results of this research. We will also provide
a variety of examples which will show some of the problems that remain
and that will require further research.",
paper = "Webe92b.pdf",
keywords = "axiomref"
\end{chunk}
\index{Bronstein, Manuel}
\begin{chunk}{axiom.bib}
@misc{Bron90,
author = "Bronstein, Manuel",
title =
"$\sum^{IT}$ -- A strongly-typed embeddable computer algebra library",
link = "\url{http://www-sop.inria.fr/cafe/Manuel.Bronstein/publications/mb_papers.html}",
abstract = "
We describe the new computer algebra library $\sum^{IT}$ and its
underlying design. The development of $\sum^{IT}$ is motivated by the
need to provide highly efficient implementations of key algorithms for
linear ordinary differential and ($q$)-difference equations to
scientific programmers and to computer algebra users, regardless of
the programming language or interactive system they use. As such,
$\sum^{IT}$ is not a computer algebra system per se, but a library (or
substrate) which is designed to be ``plugged'' with minimal efforts
into different types of client applications.",
paper = "Bron96.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Reynolds, John C.}
\begin{chunk}{axiom.bib}
@inproceedings{Reyo74,
author = "Reynolds, John C.",
title = "Towards a Theory of Type Structure",
booktitle = "Colloquim on Programming",
year = "1974",
pages = "9-11",
paper = "Reyo74.pdf"
}
\end{chunk}
\index{Chen, Kung}
\index{Hudak, Paul}
\index{Odersky, Martin}
\begin{chunk}{axiom.bib}
@inproceedings{Chen92,
author = "Chen, Kung and Hudak, Paul and Odersky, Martin",
title = "Parametric Type Classes",
booktitle = "Proc. ACM Conf. on LISP and Functional Programming",
year = "1992",
pages = "170-181"
abstract =
"We propose a generalization to Haskell's type classes where a class
can have type parameters besides the placeholder variable. We show
that this generalization is essential to represent container classes
with overloaded data constructor and selector operations. We also show
that the resulting type system has principal types and present
unification and type reconstruction algorithms.",
paper = "Chen92.pdf"
}
\end{chunk}
\index{Schoenfinkel, M.}
\begin{chunk}{axiom.bib}
@misc{Scho24,
author = "Schoenfinkel, M.",
title = "Uber die Bausteine der mathematischen Logik",
year = "1924",
pages = "305-316"
}
\end{chunk}
\index{Jones, Simon Peyton}
\begin{chunk}{axiom.bib}
@book{Jone87,
author = "Jones, Simon Peyton",
title = "The Implementation of Functional Programming Languages",
publisher = "Simon and Schuster",
year = "1987",
isbn = "0-13-453333-X",
paper = "Jone87.pdf"
}
\end{chunk}
\index{Leiss, Hans}
\begin{chunk}{axiom.bib}
@inproceedings{Leis87,
author = "Leiss, Hans",
title = "On Type Inference for Object-Oriented Programming Languages",
booktitle = "Int. Workshop on Computer Science Logic",
year = "1987",
pages = "151-172",
abstract =
"We present a type inference calculus for object-oriented programming
languages. Explicit polymorphic types, subtypes and multiple
inheritance are allowed. Class types are obtained by selection from
record types, but not considered subtypes of record types. The subtype
relation for class types reflects the (mathematically clean)
properties of subclass relations in object-oriented programming to a
better extend than previous systems did.
Based on Mitchells models for type inference, a semantics for types is
given where types are sets of values in a model of type-free lambda
calculus. For the sublanguage without type quantifiers and subtype
relation, automatic type inference is possible by extending Milners
algorithm W to deal with a polymorphic fixed-point rule.",
}
\end{chunk}
\index{Kfoury, A.J.}
\index{Tiuryn, J.}
\index{Utzyczyn, P.}
\begin{chunk}{axiom.bib}
@inproceedings{Kfou88,
author = "Kfoury, A.J. and Tiuryn, J. and Utzyczyn, P.",
title = "A Proper Extension of ML with an Effective Type-Assignment",
booktitle = "POPL 88",
year = "1988",
pages = "58-69",
abstract =
"We extend the functional language ML by allowing the recursive calls
to a function F on the right-hand side of its definition to be at
different types, all generic instances of the (derived) type of F on
the left-hand side of its definition. The original definition of ML
does not allow this feature. This extension does not produce new types
beyond the usual universal polymorphic types of ML and satisfies the
properties already enjoyed by ML: the principal-type property and the
effective type-assignment property.",
paper = "Kfou88.pdf"
}
\end{chunk}
\index{Tiuryn, J.}
\begin{chunk}{axiom.bib}
@article{Tiur90,
author = "Tiuryn, J.",
title = "Type Inference Problems -- A Survey",
journal = "LNCS",
volume = "452",
pages = "105-120",
year = "1990",
paper = "Tiur90.pdf"
}
\end{chunk}
\index{Kfoury, A. J.}
\index{Tiuryn, J.}
\index{Urzyczyn, P.}
\begin{chunk}{axiom.bib}
@article{Kfou93,
author = "Kfoury, A. J. and Tiuryn, J. and Urzyczyn, P.",
title = "The Undecidability of the Semi-unification Problem",
journal = "Information and Computation",
volume = "102",
number = "1",
year = "1993",
pages = "83-101"
abstract =
"The Semi-Unification Problem (SUP) is a natural generalization of
both first-order unification and matching. The problem arises in
various branches of computer science and logic. Although several
special cases of SUP are known to be decidable, the problem in general
has been open for several years. We show that SUP in general is
undecidable, by reducing what we call the "boundedness problem" of
Turing machines to SUP. The undecidability of this boundedness problem
is established by a technique developed in the mid-1960s to prove
related results about Turing machines.",
paper = "Kfou93.pdf"
}
\end{chunk}
\begin{chunk}{axiom.bib}
@misc{Comp17,
author = "CompCert",
title = "The CompCert Formally Certified C Compiler",
year = "2017",
link = "\url{http://compcert.inria.fr}"
}
\end{chunk}
\index{Hudak, Paul}
\index{Peterson, John}
\index{Fasel, Joseph H.}
\begin{chunk}{axiom.bib}
@misc{Huda99,
author = "Hudak, Paul and Peterson, John and Fasel, Joseph H.",
title = "A Gentle Introduction to Haskell 98",
year = "1999",
link = "\url{https://www.haskell.org/tutorial/haskell-98-tutorial.pdf}",
paper = "Huda99.pdf"
}
\end{chunk}
\index{Faxen, Karl-Filip}
\begin{chunk}{axiom.bib}
@article{Faxe02,
author = "Faxen, Karl-Filip",
title = "A Static Sematics for Haskell",
year = "2002",
journal = "J. Functional Programming",
volume = "12",
number = "4-5",
pages = "295-357",
abstract =
"This paper gives a static semantics for Haskell 98, a non-strict
purely functional programming language. The semantics formally speci
es nearly all the details of the Haskell 98 type system, including the
resolution of overloading, kind inference (including defaulting) and
polymorphic recursion, the only major omission being a proper
treatment of ambiguous overloading and its resolution. Overloading is
translated into explicit dictionary passing, as in all current
implementations of Haskell. The target language of this translation is
a variant of the Girard{Reynolds polymorphic lambda calculus featuring
higher order polymorphism and explicit type abstraction and
application in the term language. Translated programs can thus still
be type checked, although the implicit version of this system is
impredicative. A surprising result of this formalization e ort is that
the monomorphism restriction, when rendered in a system of inference
rules, compromises the principal type property.",
paper = "Faxe02.pdf"
}
\end{chunk}
\index{MacLane, Saunders}
\begin{chunk}{axiom.bib}
@book{Macl92,
author = "MacLane, Saunders",
title = "Sheaves in Geometry and Logic: A First Introduction to Topos
Theory",
year = "1992",
isbn = "978-0-387-97710-2",
publisher = "Springer"
}
\end{chunk}
\index{Manes, Ernest G.}
\begin{chunk}{axiom.bib}
@book{Mane76,
author = "Manes, Ernest G.",
title = "Algebraic Theories",
publisher = "Springer",
year = "1976",
series = "Graduate Texts in Mathematics",
isbn = "978-1-9860-1"
}
\end{chunk}
\index{Paterson, M. S.}
\begin{chunk}{axiom.bib}
@article{Pate78,
author = "Paterson, M. S.",
title = "Linear Unification",
journal = "J. Computer and System Sciences",
volume = "16",
number = "2",
year = "1978",
pages = "158-167",
abstract =
"A unification algorithm is described which tests a set of expressions
for unifiability and which requires time and space which are only linear
in the size of the input",
paper = "Pate78.pdf"
}
\end{chunk}
\index{Kanellakis, Paris C.}
\index{Mairson, Harry G.}
\index{Mitchell, John C.}
\begin{chunk}{axiom.bib}
@techreport{Kane90,
author = "Kanellakis, Paris C. and Mairson, Harry G. and Mitchell, John C.",
title = "Unification and ML Type Reconstruction",
link = "\url{ftp://ftp.cs.brown.edu/pub/techreports/90/cs90-26.pdf}",
institution = "Brown University",
year = "1990",
number = "CS-90-26",
abstract =
"We study the complexity of type reconstruction for a core fragment of
ML with lambda abstraction, function application, and the polymorphic
{\bf let} declaration. We derive exponential upper and lower bounds on
recognizing the typable core ML expressions. Our primary technical
tool is unification of succinctly represented type expressions. After
observing that core ML expressions, of size $n$, can be typed in
DTIME($s^n$), we exhibit two different families of programs whose
principal types grow exponentially. We show how to exploit the
expressiveness of the {\bf let}-polymorphism in these constructions to
derive lower bounds on deciding typability: one leads naturally to
NP-hardness and the other to DTIME($2^{n^k}$)-hardness for each integer
$k\ge 1$. Our generic simulation of any exponential time Turing
Machine by ML type reconstruction may be viewed as a nonstandard way
of computing with types. Our worse-case lower bounds stand in contrast
to practical experience, which suggests that commonly used algorithms
for type reconstruction do not slow compilation substantially.",
paper = "Kane90.pdf"
}
\end{chunk}
\index{Volpano, Dennis M.}
\index{Geoffrey S.}
\begin{chunk}{axiom.bib}
@techreport{Volp91,
author = "Volpano, Dennis M. and Geoffrey S.",
title = "On the Complexity of ML Typability with Overloading",
institution = "Cornell University",
year = "1991",
number = "TR91-1210",
abstract =
"We examine the complexity of type checking in an ML-style type system
that permits functions to be overloaded with different types. In
particular, we consider the extension of the ML Type system proposed
by Wadler and Blott in the appendix of [WB89], with global overloading
only, that is, where the only overloading is that which exists in an
initial type assumption set; no local overloading via over and inst
expressions is allowed. It is shown that under a correct notion of
well-typed terms, the problem of determining whether a term is well
typed with respect to an assumption set in this system is
undecidable. We then investigate limiting recursion in assumption
sets, the source of the undecidability. Barring mutual recursion is
considered, but this proves too weak, for the problem remains
undecidable. Then we consider a limited form of recursion called
parametric recursion. We show that although the problem becomes
decidable under parametric recursion, it appears harder than
conventional ML typability, which is complete for DEXPTIME [Mai90].",
paper = "Volp91.pdf"
}
\end{chunk}
\index{Hodges, Wilfrid}
\begin{chunk}{axiom.bib}
@article{Hodg95,
author = "Hodges, Wilfrid",
title = "The Meaning of Specifications I: Domains and Initial Models",
journal = "Theoretical Computer Science",
volume = "192",
issue = "1",
year = "1995",
pages = "67-89",
abstract =
"This is the first of a short series of papers intended to provide one
common semantics for several different types of specification
language, in order to allow comparison and translations. The
underlying idea is that a specification describes the behaviour of a
system, depending on parameters. We can represent this behaviour as a
functor which acts on structures representing the parameters, and
which yields a structure representing the behaviour. We characterise
in domain-theoretic terms the class of functors which could in
principle be specified and implemented; briefly, they are the functors
which preserve directed colimits and whose restriction to finitely
presented structures is recursively enumerable. We also characterise
those functors which allow specification by initial semantics in
universal Horn classes with finite vocabulary; these functors consist
of a free functor (i.e. left adjoint of a forgetful functor) followed
by a forgetful functor. The main result is that these two classes of
functor are the same up to natural isomorphism.",
paper = "Hodg95.pdf"
}
\end{chunk}
\index{Graetzer, George}
\begin{chunk}{axiom.bib}
@book{Grae79,
author = "Graetzer, George",
title = "Universal Algebra",
publisher = "Springer",
isbn = "978-0-387-77486-2"
year = "1979",
paper = "Grae79.pdf"
}
\end{chunk}
\index{Dershowitz, Nachum}
\index{Jouannaud, Jean-Pierre}
\begin{chunk}{axiom.bib}
@techreport{Ders89,
author = "Dershowitz, Nachum and Jouannaud, Jean-Pierre",
title = "Rewrite Systems",
year = "1989",
number = "478",
institution = "Laboratoire de Recherche en Informatique",
paper = "Ders89.pdf"
}
\end{chunk}
\index{Chang, C.C.}
\index{Keisler, H. Jerome}
\begin{chunk}{axiom.bib}
@book{Chan90,
author = "Chang, C.C. and Keisler, H. Jerome",
title = "Model Theory",
publisher = "North Holland"
year = "1990",
comment = "Studics in Logic and the Foundations of Mathematics",
volume = "73",
abstract =
"Since the second edition of this book (1977), Model Theory has
changed radically, and is now concerned with fields such as
classification (or stability) theory, nonstandard analysis,
model-theoretic algebra, recursive model theory, abstract model
theory, and model theories for a host of nonfirst order logics. Model
theoretic methods have also had a major impact on set theory,
recursion theory, and proof theory.
This new edition has been updated to take account of these changes,
while preserving its usefulness as a first textbook in model
theory. Whole new sections have been added, as well as new exercises
and references. A number of updates, improvements and corrections have
been made to the main text",
}
\end{chunk}
\index{Goguen, Joseph A.}
\index{Winkler, Timothy}
\index{Meseguer, Jose}
\index{Futatsugi, Kokichi}
\index{Jouannaud, Jean-Pierre}
\begin{chunk}{axiom.bib}
@techreport{Gogu92,
author = "Goguen, Joseph A. and Winkler, Timothy and Meseguer, Jose and
Futatsugi, Kokichi and Jouannaud, Jean-Pierre",
title = "Introducing OBJ",
institution = "SRI International",
number = "SIR-CSL-92-03",
year = "1992",
abstract =
"This is an introduction to OBJ, describing its philosophy, its
syntax, and aspects of its semantics, both logical and operational,
with many examples, based on Release 2.0 of OBJ3. OBJ is a wide
spectrum first-order functional language that is rigorously based upon
equational logic. This semantic basis supports a declarative,
specificational style, facilitates program verification, and allows
OBJ to be used as a theorem prover. OBJ3 is based upon order-sorted
equational logic, which provides a notion of subsort that rigorously
supports multiple inheritance, exception handling and
overloading. OBJ3 also provides parameterized programming, a technique
which provides powerful support for design, verification, reuse, and
maintenance.
This facility is based on using two kinds of module: objects to
encapsulate executable code, and in particular to define abstract data
types by initial algebra semantics; and theories to specify both
syntactic structure and semantic properties for modules and module
interfaces. Each kind of module can be parameterized, where actual
parameters are modules. For parameter instantiation, a view binds the
formal entities in an interface theory to actual entities in a module,
and also asserts that the target module satisfies the semantic
requirements of the interface theory. Module expressions allow complex
combinations of already defined modules, including sums,
instantiations, and transformations; moreover, evaluating a module
expression actually constructs the described software (sub)system from
the given components.
Default views can greatly reduce the effort of instantiating
modules. We argue that first-order parameterized programming includes
much ofthe power of higher-order programming. Although OBJ executable
code normally consists of equations that are interpreted as rewrite
rules, OBJ objects can also encapsulate Lisp code, e.g., to provide
efficient built-in data types, or to augment the system with new
capabilities; we describe the syntax of the facility, and provide some
examples. In addition, OBJ provides rewriting modulo associative,
commutative and/or identity equations, as well as user-definable
evaluation strategies that allow lazy, eager, and mixed evaluation
strategies on an operation-by-operation basis; memoization [sic] is
also available on an operation-by-operation basis. Finally, OBJ
provides user-definable mixfix syntax, which supports using the
notational conventions of particular application domains.",
paper = "Gogu92.pdf"
}
\end{chunk}
\index{Limongelli, C.}
\index{Temperini, M.}
\begin{chunk}{axiom.bib}
@article{Limo92,
author = "Limongelli, C. and Temperini, M.",
title = "Abstract Specification of Structures and Methods in Symbolic
Mathematical Computation",
journal = "Theoretical Computer Science",
volume = "104",
year = "1992",
pages = "89-107",
abstract =
"This paper describes a methodology based on the object-oriented
programming paradigm, to support the design and implementation of a
symbolic computation system. The requirements of the system are
related to the specification and treatment of mathematical
structures. This treatment is considered from both the numerical and
the symbolic points of view. The resulting programming system should
be able to support the formal definition of mathematical data
structures and methods at their highest level of abstraction, to
perform computations on instances created from such definitions, and
to handle abstract data structures through the manipulation of their
logical properties. Particular consideration is given to the
correctness aspects. Some examples of convenient application of the
proposed design methodology are presented.",
paper = "Limo92.pdf"
}
\end{chunk}
\index{Breazu-Tannen, V.}
\index{Coquand, T.
\index{Gunter, C.A.}
\index{Scedrov, A.}
\begin{chunk}{axiom.bib}
@inproceedings{Brea89,
author = "Breazu-Tannen, V. and Coquand, T. and Gunter, C.A. and
Scedrov, A.",
title = "Inheritance and Explicit Coercion",
booktitle = "Logic in Computer Science",
year = "1989",
isbn = "0-8186-1954-6",
abstract =
"A method is presented for providing semantic interpretations for
languages which feature inheritance in the framework of statically
checked, rich type disciplines. The approach is illustrated by an
extension of the language Fun of L. Cardelli and P. Wegner (1985),
which is interpreted via a translation into an extended polymorphic
lambda calculus. The approach interprets inheritances in Fun as
coercion functions already definable in the target of the
translation. Existing techniques in the theory of semantic domains can
then be used to interpret the extended polymorphic lambda calculus,
thus providing many models for the original language. The method
allows the simultaneous modeling of parametric polymorphism, recursive
types, and inheritance, which has been regarded as problematic because
of the seemingly contradictory characteristics of inheritance and type
recursion on higher types. The main difficulty in providing
interpretations for explicit type disciplines featuring inheritance is
identified. Since interpretations follow the type-checking
derivations, coherence theorems are required, and the authors prove
them for their semantic method.",
paper = "Brea89.pdf"
}
\end{chunk}
\index{Breazu-Tannen, Val}
\index{Coquand, Thierry}
\index{Gunter, Carl A.}
\index{Scedrov, Andre}
\begin{chunk}{axiom.bib}
@article{Brea91,
author = "Breazu-Tannen, Val and Coquand, Thierry and Gunter, Carl A. and
Scedrov, Andre",
title = "Inheritance as Implicit Coercion",
volume = "93",
number = "1",
year = "1991",
pages = "172-221",
abstract =
"We present a method for providing semantic interpretations for
languages with a type system featuring inheritance polymorphism. Our
approach is illustrated on an extension of the language Fun of
Cardelli and Wegner, which we interpret via a translation into an
extended polymorphic lambda calculus. Our goal is to interpret
inheritances in Fun via coercion functions which are definable in the
target of the translation. Existing techniques in the theory of
semantic domains can be then used to interpret the extended
polymorphic lambda calculus, thus providing many models for the
original language. This technique makes it possible to model a rich
type discipline which includes parametric polymorphism and recursive
types as well as inheritance. A central difficulty in providing
interpretations for explicit type disciplines featuring inheritance in
the sense discussed in this paper arises from the fact that programs
can type-check in more than one way. Since interpretations follow the
type-checking derivations, coherence theorems are required: that is,
one must prove that the meaning of a program does not depend on the
way it was type-checked. Proofs of such theorems for our proposed
interpretation are the basic technical results of this
paper. Interestingly, proving coherence in the presence of recursive
types, variants, and abstract types forced us to reexamine fundamental
equational properties that arise in proof theory (in the form of
commutative reductions) and domain theory (in the form of strict
vs. non-strict functions).",
paper = "Brea91.pdf"
}
\end{chunk}
\index{Smolka, G.}
\begin{chunk}{axiom.bib}
@phdthesis{Smol89a,
author = "Smolka, G.",
title = "Logic Programming over Polymorphically Order-Sorted Types"
institution = "Fachbereich Informatik, Universitat Kaiserslautern",
year = "1989"
}
\end{chunk}
\index{Wirsing, Martin}
\index{Broy, Manfred}
\begin{chunk}{axiom.bib}
@inproceedings{Wirs82,
author = "Wirsing, Martin and Broy, Manfred",
title = "An Analysis of Semantic Models for Algebraic Specifications",
booktitle = "Theoretical Foundations of Programming Methodology",
year = "1982",
publisher = "Springer",
pages = "351-413",
isbn = "978-94-009-7893-5",
abstract =
"Data structures, algorithms and programming languages can be
described in a uniform implementation-independent way by axiomatic
abstract data types i.e. by algebraic specifications defining
abstractly the properties of objects and functions. Different semantic
models such as initial and terminal algebras have been proposed in
order to specify the meaning of such specifications -often involving a
considerable amount of category theory. A more concrete semantics
encompassing these different approaches is presented:
Abstract data types are specified in hierarchies, employing
``primitive'' types on which other types are based. The semantics is
defined to be the class of all partial heterogeneous algebras
satisfying the axioms and respecting the hierarchy. The interpretation
of a specification as its initial or terminal algebra is just a
constraint on the underlying data. These constraints can be modified
according to the specification goals. E.g. the data can be specified
using total functions; for algorithms partial functions with
syntactically checkable domains seem appropriate whereas for
programming languages the general notion of partiality is needed,
Model-theoretic and deduction-oriented conditions are developed which
ensure properties leading to criteria for the soundness and complexity
of specifications. These conditions are generalized to parameterized
types, i.e. type procedures mapping types into types. Syntax and
different semantics of parameter are defined and discussed. Criteria
for proper parameterized specifications are developed. It is shown
that the properties of proper specifications viz. of snowballing and
impeccable types are preserved under application of parameterized
types — finally guaranteeing that the composition of proper small
specifications always leads to a proper large specification.",
\end{chunk}
\begin{chunk}{axiom.bib}
@misc{GAPx17,
author = "The GAP Group",
title = "GAP - Reference Manual",
year = "2017",
link = "\url{https://www.gap-system.org/Manuals/doc/ref/manual.pdf}"
}
\end{chunk}
\index{Char, Bruce}
\index{Geddes, Keith O.}
\index{Gonnet, Gaston H.}
\index{Leong, Benton}
\index{Monagan, Michael B.}
\index{Watt, Stephen M.}
\begin{chunk}{axiom.bib}
@book{Char91a,
author = "Char, Bruce and Geddes, Keith O. and Gonnet, Gaston H. and
Leong, Benton and Monagan, Michael B. and Watt, Stephen M.",
title = "Maple V Library Reference Manual",
publisher = "Springer",
year = "1991",
isbn = "978-1-4757-2133-1"
abstract =
"The design and implementation of the Maple system is an on-going
project of the Symbolic Com putation Group at the University of
Waterloo in Ontario, Canada. This manual corresponds with version V
(roman numeral five) of the Maple system. The on-line help subsystem
can be invoked from within a Maple session to view documentation on
specific topics. In particular, the command ?updates points the user
to documentation updates for each new version of Maple. The Maple
project was first conceived in the autumn of 1980, growing out of
discussions on the state of symbolic computation at the University of
Waterloo. The authors wish to acknowledge many fruitful discussions
with colleagues at the University of Waterloo, particularly Morven
Gen tleman, Michael Malcolm, and Frank Tompa. It was recognized in
these discussions that none ofthe locaIly-available systems for
symbolic computation provided the facilities that should be expected
for symbolic computation in modern computing environments. We
concluded that since the basic design decisions for the then-current
symbolic systems such as ALTRAN, CAMAL, REDUCE, and MACSYMA were based
on 1960's computing technology, it would be wise to design a new
system ``from scratch//. Thus we could take advantage of the software
engineering technology which had become available in recent years, as
well as drawing from the lessons of experience. Maple's basic features
(elementary data structures, Input/output, arithmetic with numbers,
and elementary simplification) are coded in a systems programming
language for efficiency."
}
\end{chunk}
\index{Monk, J. Donald}
\begin{chunk}{axiom.bib}
@book{Monk76,
author = "Monk, J. Donald",
title = "Mathematical Logic",
publisher = "Springer",
year = "1976",
isbn = "978-1-4684-9452-5"
}
\end{chunk}
\index{Meyer, Albert R.}
\index{Reinhold, Mark B.}
\begin{chunk}{axiom.bib}
@inproceedings{Meye86,
author = "Meyer, Albert R. and Reinhold, Mark B.",
title = "Type is not a type",
booktitle = "POPL 86",
pages = "287-295",
abstract =
"A function has a dependent type when the type of its result
depends upon the value of its argument. Dependent types originated in
the type theory of intuitionistic mathematics and have reappeared
independently in programming languages such as CLU, Pebble, and
Russell. Some of these languages make the assumption that there exists
a type-of-all-types which is its own type as well as the type
of all other types. Girard proved that this approach is inconsistent
from the perspective of intuitionistic logic. We apply Girard's
techniques to establish that the type-of-all-types assumption creates
serious pathologies from a programming perspective: a system using
this assumption is inherently not normalizing, term equality is
undecidable, and the resulting theory fails to be a conservative
extension of the theory of the underlying base types. The failure of
conservative extension means that classical reasoning about programs
in such a system is not sound.",
}
\end{chunk}
\index{Howe, Douglas J.}
\begin{chunk}{axiom.bib}
@techreprot{Howe87,
author = "Howe, Douglas J.",
title = "The Computational Behaviour of Girard's Paradox",
institution = "Cornell University",
year = "1987",
link = "\url{https://ecommons.cornell.edu/handle/1813/6660}",
number = "TR 87-820",
abstract =
"In their paper ``Type'' Is Not a Type, Meyer and Reinhold argued that
serious pathologies can result when a type of all types is added to a
programing language with dependent types. Central to their argument is
the claim that by following the proof of Girard's paradox it is
possible to construct in their calculus $\lambda^{\tau \tau}$ a term
having a fixed-point property. Because of the tremendous amount of
formal detail involved, they were unable to establish this claim. We
have made use of the Nuprl proof development system in constructing a
formal proof of Girard's paradox and analysing the resulting term. We
can show that the term does not have the desired fixed-point property,
but does have a weaker form of it that is sufficient to establish some
of the results of Meyer and Reinhold. We believe that the method used
here is in itself of some interest, representing a new kind of
application of a computer to a problem in symbolic logic.",
}
\end{chunk}
\index{Coquand, T.}
\begin{chunk}{axiom.bib}
@techreport{Coqu86,
author = "Coquand, Thierry",
title = "An Analysis of Girard's Paradox",
institution = "Institut National de Recherche en Informatique et en
Automatique",
year = "1986",
abstract =
"We study the consistency of a few formal systems, specially some
extensions of Church's calculus and the construction system. We show
that Church's calculus is not compatible with the notion of
second-order type. We apply this result for showing that the calculus
of construction with four levels is inconsistent. We suggest finally
some consistent extensions of these two calculi."
paper = "Coqu86.pdf"
}
\end{chunk}
\index{Cardelli, Luca}
\begin{chunk}{axiom.bib}
@article{Card88,
author = "Cardelli, Luca",
title = "A Semantics of Multiple Inheritance",
journal = "Information and Computation",
volume = "76",
number = "2-3",
year = "1988",
pages = "138-164",
paper = "Card88.pdf"
}
\end{chunk}
\index{Godel, Kurt}
\begin{chunk}{axiom.bib
@misc{Gode58,
author = "Godel, Kurt",
title = {\"Uber eine bisher noch nicht benutzte Erweiterung des Finiten
Standpunktes},
journal = "Dialectica 12",
year = "1958",
pages = "280-287"
}
\end{chunk}
\index{Girard, Jean-Yves}
\index{Taylor, Paul}
\index{Lafont, Yves}
\begin{chunk}{axiom.bib}
@book{Gira89,
author = "Girard, Jean-Yves",
title = "Proofs and Types",
publisher = "Cambridge University Press",
year = "1989"
}
\end{chunk}
\index{Pierce, Benjamin C.}
\begin{chunk}{axiom.bib}
@phdthesis{Pier91,
author = "Pierce, Benjamin C.",
title = "Programming with Intersection Types and Bounded Polymorphism",
institution = "Carnegie Mellon University",
year = "1991",
comment = "CMU-CS-91-205",
abstract =
"Intersection types and bounded quantification are complementary
mechanisms for extending the expressive power of statically typed
programming languages. They begin with a common framework: a simple,
typed language with higher-order functions and a notion of subtyping.
Intersection types extend this framework by giving every pair of types
$\sigma$ and $\tau$ a greatest lower bound, $\sigma \land \tau$,
corresponding intuitively to the intersection of the sets of values
described by $\sigma$ and $\tau$. Bounded quantification extends the
basic framework along a different axis by adding polymorphic functions
that operate uniformly on all the subtypes of a given type. This thesis
unifies and extends prior work on intersection types and bounded
quantification, previously studied only in isolation, by investigating
theoretical and practical aspects of a typed $\lambda$-calculus
incorporating both.
The practical utility of this calculus, called $F_\land$ is
established by examples showing, for instance, that it allows a rich
form of ``coherent overloading'' and supports an analog of abstract
interpretation during typechecking; for example, the addition function
is given a type showing that it maps pairs of positive inputs to a
positive result, pairs of zero inputs to a zero result, etc. More
familiar programming examples are presented in terms of an extention
of Forsythe (an Algol-like language with intersection types),
demonstrating how parametric polymorphism can be used to simplify and
generalize Forsythe's design. We discuss the novel programming and
debugging styles that arise in $F_\land$.
We prove the correctness of a simple semi-decision procedure for the
subtype relation and the partial correctness of an algorithm for
synthesizing minimal types of $F_\land$ terms. Our main tool in this
analysis is a notion of ``canonical types,'' which allows proofs to be
factored so that intersections are handled separately from the other
type constructors.
A pair of negative results illustrates some subtle complexities of
$F_\land$. First, the subtype relation of $F_\land$ is shown to be
undecidable; in fact, even the sutype relation of pure second-order
bounded quantification is undecidable, a surprising result in its own
right. Second, the failure of an important technical property of the
subtype relation -- the existence of least upper bounds -- indicates
that typed semantic models of $F_\land$ will be more difficult to
construct and analyze than the known typed models of intersection
types. We propose, for future study, some simpler fragments of
$F_\land$ that share most of its essential features, while recovering
decidability and least upper bounds.
We study the semantics of $F_\land$ from several points of view. An
untyped model based on partial equivalence relations demonstrates the
consistency of the typing rules and provides a simple interpolation
for programs, where ``$\sigma$ is a subtype of $\tau$'' is read as
``$\sigma$ is a subset of $\tau$.'' More refined models can be
obtained using a translation from $F_\land$ into the pure polymorphic
$\lambda$-calculus; in these models, ``$\sigma$ is a subtype of
$\tau$'' is interpreted by an explicit coercion function from $\sigma$
to $\tau$. The nonexistence of least upper bounds shows up here in
the failure of known techniques for proving the coherence of the
translation semantics. Finally, an equational theory of equivalences
between $F_\land$ terms is presented and its soundness for both styles
of model is verified.",
paper = "Pier91.pdf"
}
\end{chunk}
\index{Pierce, Benjamin C.}
\begin{chunk}{axiom.bib}
@techreport{Pier91a,
author = "Pierce, Benjamin C.",
title = "Bounded Quantification is Undecidable",
year = "1991",
number = "CMU-CS-91-161",
link = "\url{http://repository.cmu.edu/cgi/viewcontent.cgi?article=3059}",
abstract =
"$$F_\le$ is a typed $\lambda$-calculus with subtyping and bounded
second-order polymorphism. First introduced by Cardelli and Wegner, it
has been widely studied as a core calculus for type systems with
subtyping.
Curien and Ghelli proved the partial correctness of a recursive
procedure for computing minimal types of $$F_\le$ terms and showed
that the termination of this procedure is equivalent to the
termination of its major component, a procedure for checking the
subtype relation between $$F_\le$ types. Ghelli later claimed that
this procedure is also guaranteed to terminate, but the discovery of a
subtle bug in his proof led him recently to observe that, in fact,
there are inputs on which the subtyping procedure diverges. This
reopens the question of the decidability of subtyping and hence of
typechecking.
This question is settled here in the negative, using a reduction from
the halting problem for two-counter Turing machines to show that the
subtype relation of $$F_\le$ is undecidable.",
paper = "Pier91a.pdf"
}
\end{chunk}
\index{Meyer, Bertrand}
\begin{chunk}{axiom.bib}
@book{Meye97,
author = "Meyer, Bertrand",
title = "Object-Oriented Software Construction",
year = "1997",
publisher = "Prentice Hall"
}
\end{chunk}
\index{Goldberg, Adele}
\index{Robson, David}
\begin{chunk}{axiom.bib}
@book{Gold83,
author = "Goldberg, Adele and Robson, David",
title = "Smalltalk-80: The Language and Its Implementation",
publisher = "Addison-Wesley",
year = "1983"
}
\end{chunk}
\index{Kirkerud, Bjorn}
\begin{chunk}{axiom.bib}
@book{Kirk89,
author = "Kirkerud, Bjorn",
title = "Object-Oriented Programming With Simula",
year = "1989",
series = "International Computer Science Series",
publisher = "Addison-Wesley"
}
\end{chunk}
\index{Birtwistle, Graham M.}
\begin{chunk}{axiom.bib}
@book{Birt80,
author = "Birtwistle, Graham M.",
title = "Simula Begin",
year = "1980",
publisher = "Chartwell-Bratt",
isbn = "9780862380090"
}
\end{chunk}
\index{Stroustrup, Bjarne}
\begin{chunk}{axiom.bib}
@book{Stro95,
author = "Stroustrup, Bjarne",
title = "The C++ Programming Language (2nd Edition)",
publisher = "Addison-Wesley",
year = "1995",
isbn = "0-201-53992-6"
}
\end{chunk}
\index{Bruce, Kim B.}
\begin{chunk}{axiom.bib}
@inproceedings{Bruc93,
author = "Bruce, Kim B.",
title = "Safe type checking in a statically-typed object-oriented
programming language",
booktitle = "POPL 93",
year = "1993",
isbn = "0-89791-560-7",
pages = "285-298",
abstract =
" In this paper we introduce a statically-typed, functional,
object-oriented programming language, TOOPL, which supports classes,
objects, methods, instance variable, subtypes, and inheritance. It has
proved to be surprisingly difficult to design statically-typed
object-oriented languages which are nearly as expressive as Smalltalk
and yet have no holes in their typing systems. A particular problem
with statically type checking object-oriented languages is determining
whether a method provided in a superclass will continue to type check
when inherited in a subclass. This program is solved in our language
by providing type checking rules which guarantee that a method which
type checks as part of a class will type check correctly in all legal
subclasses in which it is inherited. This feature enables library
providers to provide only the interfaces of classes with executables
and still allow users to safely create subclasses. The design of TOOPL
has been guided by an analysis of the semantics of the language, which
is given in terms of a sufficiently rich model of the F-bounded
second-order lambda calculus. This semantics supported the language
design by providing a means of proving that the type-checking rules
for the language are sound, ensuring that well-typed terms produce
objects of the appropriate type. In particular, in a well-typed
program it is impossible to send a message to an object which lacks a
corresponding method.",
paper = "Bruc93.pdf"
}
\end{chunk}
\index{Abdali, S. Kamal}
\index{Cherry, Guy W.}
\index{Soiffer, Neil}
\begin{chunk}{axiom.bib}
@inproceedings{Abda86,
author = "Abdali, S. Kamal and Cherry, Guy W. and Soiffer, Neil",
title = "A Smalltalk System for Algebraic Manipulation",
booktitle = "OOPSLA 86",
pages = "277-293",
year = "1986",
abstract =
"This paper describes the design of an algebra system Views
implemented in Smalltalk. Views contains facilities for dynamic
creation and manipulation of computational domains, for viewing these
domains as various categories such as groups, rings, or fields, and
for expressing algorithms generically at the level of categories. The
design of Views has resulted in the addition of some new abstractions
to Smalltalk that are quite useful in their own right. Parameterized
classes provide a means for run-time creation of new classes that
exhibit generally very similar behavior, differing only in minor ways
that can be described by different instantiations of certain
parameters. Categories allow the abstraction of the common behavior of
classes that derives from the class objects and operations satisfying
certain laws independently of the implementation of those objects and
operations. Views allow the run-time association of classes with
categories (and of categories with other categories), facilitating the
use of code written for categories with quite different
interpretations of operations. Together, categories and views provide
an additional mechanism for code sharing that is richer than both
single and multiple inheritance. The paper gives algebraic as well as
non-algebraic examples of the above-mentioned features.",
paper = "Abda86.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Berger, Emery}
\begin{chunk}{axiom.bib}
@techreport{Berg92,
author = "Berger, Emery",
title = "FP + OOP = Haskell",
institution = "University of Texas",
number = "TR-92-30",
abstract =
"The programming language Haskell adds object-oriented functionality
(using a concept known as type classes) to a pure functional
programming framework. This paper describes these extensions and
analyzes its accomplishments as well as some problems."
}
\end{chunk}
\index{Barendregt, H. P.}
\begin{chunk}{axiom.bib}
@book{Bare84,
author = "Barendregt, H. P.",
title = "The Lambda Calculus: Its Syntax and Semantics",
publisher = "Elsevier Science",
year = "1984"
}
\end{chunk}
\index{Goguen, Joseph}
\index{Meseguer, Jose}
\begin{chunk}{axiom.bib}
@article{Gogu92,
author = "Goguen, Joseph and Meseguer, Jose",
title = "Order-sorted Algebra I : Equational Deduction for Multiple
Inheritance, Overloading, Exceptions, and Partial Operations",
journal = "Theoretical Computer Science",
volume = "105",
number = "2"
year = "1992",
pages = "217-273",
abstract =
"This paper generalizes many-sorted algebra (MSA) to order-sorted
algebra (OSA) by allowing a partial ordering relation on the set of
sorts. This supports abstract data types with multiple inheritance (in
roughly the sense of object-oriented programming), several forms of
polymorphism and overloading, partial operations (as total on
equationally defined subsorts), exception handling, and an operational
semantics based on term rewriting. We give the basic algebraic
constructions for OSA, including quotient, image, product and term
algebra, and we prove their basic properties, including quotient,
homomorphism, and initiality theorems. The paper's major mathematical
results include a notion of OSA deduction, a completeness theorem for
it, and an OSA Birkhoff variety theorem. We also develop conditional
OSA, including initiality, completeness, and McKinsey-Malcev
quasivariety theorems, and we reduce OSA to (conditional) MSA, which
allows lifting many known MSA results to OSA. Retracts, which
intuitively are left inverses to subsort inclusions, provide
relatively inexpensive run-time error handling. We show that it is
safe to add retracts to any OSA signature, in the sense that it gives
rise to a conservative extension. A final section compares and
contrasts many different approaches to OSA. This paper also includes
several examples demonstrating the flexibility and applicability of
OSA, including some standard benchmarks like stack and list, as well
as a much more substantial example, the number hierarchy from the
naturals up to the quaternions.",
paper = "Gogu92.pdf"
}
\end{chunk}
\index{Cardelli, Luca}
\begin{chunk}{axiom.bib}
@inproceedings{Card86,
author = "Cardelli, Luca",
title = "Typechecking Dependent Types and Subtypes",
link =
"\url{http://lucacardelli.name/Papers/Dependent%20Typechecking.US.pdf}",
year = "1996",
journal = "LNCS",
volume = "523",
pages = "45-57"
paper = "Card86.pdf"
}
\end{chunk}
\index{Zariski, Oscar}
\index{Samuel, Pierre}
\begin{chunk}{axiom.bib}
@book{Zari75,
author = "Zariski, Oscar and Samuel, Pierre",
title = "Commutative Algebra",
Series = "Graduate Texts in Mathematics",
year = "1975",
publisher = "Springer-Verlag",
isbn = "978-0387900896"
}
\end{chunk}
\index{Marcus, Daniel A.}
\begin{chunk}{axiom.bib}
@book{Marc77,
author = "Marcus, Daniel A.",
title = "Number Fields",
publisher = "Springer",
year = "1977",
isbn = "978-0387902791"
}
\end{chunk}
\index{Lang, Serge}
\begin{chunk}{axiom.bib}
@book{Lang05,
author = "Lang, Serge",
title = "Algebra",
publisher = "Springer",
year = "2005",
series = "Graduate Texts in Mathematics",
isbn = "978-0387953854"
}
\end{chunk}
\index{Fuh, You-Chin}
\index{Mishra, Prateek}
\begin{chunk}{axiom.bib}
@article{Fuhx89,
author = "Fuh, You-Chin and Mishra, Prateek",
title = "Polymorphic Subtype Inference -- Closing the Theory-Practice Gap",
journal = "Lecture Notes in Computer Science",
volume = "352",
year = "1989",
pages = "167-183",
paper = "Fuhx89.pdf"
}
\end{chunk}
\index{Kaes, Stefan}
\begin{chunk}{axiom.bib}
@article{Kaes92,
author = "Kaes, Stefan",
title = "Type Inference in the Presence of Overloading, Subtyping, and
Recursive Types",
journal = "LISP Pointers",
volume = "V",
number = "1",
pages = "193-204"
year = "1992",
paper = "Kaes92.pdf"
}
\end{chunk}
\index{Robinson, J. S. Derek}
\begin{chunk}{axiom.bib}
@book{Robi96,
author = "Robinson, J. S. Derek",
title = "A Course in the Theory of Groups",
year = "1996",
series = "Graduate Texts in Mathematics",
isbn = "978-1-4612-6443-9",
publisher = "Springer"
}
\end{chunk}
\index{Thatte, Satish R.}
\begin{chunk}{axiom.bib}
@article{That91,
author = "Thatte, Satish R.",
title = "Coercive Type Isomorphism",
journal = "LNCS",
volume = "523",
year = "1991",
pages = "29-49",
abstract =
"There is a variety of situations in programming in which it is useful
to think of two distinct types as representations of the same abstract
structure. However, language features which allow such relations to
be effectively expressed at an abstract level are lacking. We propose
a generalization of ML-style type inference to deal effectively with
this problem. Under the generalization, the (normally free) algebra
of type expressions is subjected to an equational theory generated by
a finite set of user-specified equations that express
interconvertibility relations between objects of ``equivalent'' types.
Each type equation is accompanied by a pair of conversion functions
that are (at least partial) inverses. We show that so long as the
equational theory satisfies a reasonably permissive syntactic
constraint, the resulting type system admits a complete type infer-
ence algorithm that produces unique principal types. The main
innovation required in type inference is the replacement of ordinary
free unification by unification in the user-specified equational
theory. The syntactic constraint ensures that the latter is unitary,
i.e., yields unique most general unifiers. The proposed constraint is
of independent interest as the first known syntactic
characterization for a class of unitary theories. Some of the
applicatloils of the system are similar to those of Wadler's views
[Wad87]. However, our system is considerably more general, and more
orthogonal to the underlying language.",
paper = "That91.pdf"
}
\end{chunk}
\index{Bundgen, Reinhard}
\begin{chunk}{axiom.bib}
@book{Bund93,
author = "Bundgen, Reinhard",
title = "The ReDuX System Documentation",
year = "1993",
publisher = "WSI"
}
\end{chunk}
\index{Bundgen, Reinhard}
\begin{chunk}{axiom.bib}
@inproceedings{Bund93a,
author = "Bundgen, Reinhard",
title = {Reduce the Redex $->$ ReDuX},
booktitle = "Proc. Rewriting Techniques and Applications 93",
year = "1993",
pages = "446-450",
publisher = "Springer-Verlag",
isbn = "3-540-56868-9"
}
\end{chunk}
\index{Cohn, P. M.}
\begin{chunk}{axiom.bib}
@book{Cohn91,
author = "Cohn, P. M.",
title = "Algebra",
publisher = "John Wiley and Sons",
year = "1991",
isbn = "0471101680",
paper= = "Cohn91.pdf"
}
\end{chunk}
\index{Jouannaud, Jean-Pierre}
\index{Kirchner, Claude}
\begin{chunk}{axiom.bib}
@book{Joua90,
author = "Jouannaud, Jean-Pierre and Kirchner, Claude",
title = "Solving Equations in Abstract Algebras: A Rule-based Survey of
Unification",
year = "1990",
publisher = "Universite do Paris-Sud"
}
\end{chunk}
\index{Kowalsky, Hans Joachim}
\begin{chunk}{axiom.bib}
@book{Kowa63,
author = "Kowalsky, Hans Joachim",
title = "Linear Algebra",
year = "1963",
publisher = "Walter de Gruyter",
comment = "(German)"
}
\end{chunk}
\index{Reynolds, John C.}
\begin{chunk}{axiom.bib}
@inproceedings{Reyn80,
author = "Reynolds, John C.",
title = "Using Category Theory to Design Implicit Conversions and
Generic Operators",
booktitle = "Lecture Notes in Computer Science",
year = "1980",
abstract =
"A generalization of many-sorted algebras, called category-sorted
algebras, is defined and applied to the language-design problem of
avoiding anomalies in the interaction of implicit conversions and
generic operators. The definition of a simple imperative language
(without any binding mechanisms) is used as an example.",
paper = "Reyn80.pdf"
}
\end{chunk}
\index{Stansifer, R.}
\begin{chunk}{axiom.bib}
@inproceedings{Stan88,
author = "Stansifer, R.",
title = "Type Inference with Subtypes",
booktitle = "POPL 88",
pages = "88-97"
year = "1988",
abstract =
"We give an algorithm for type inference in a language with functions,
records, and variant records. A similar language was studied by
Cardelli who gave a type checking algorithm. This language is
interesting because it captures aspects of object-oriented programming
using subtype polymorphism. We give a type system for deriving types
of expressions in the language and prove the type inference algorithm
is sound, i.e., it returns a type derivable from the proof system. We
also prove that the type the algorithm finds is a ``principal'' type,
i.e., one which characterizes all others. The approach taken here is
due to Milner for universal polymorphism. The result is a synthesis of
subtype polymorphism and universal polymorphism.",
paper = "Stan88.pdf"
}
\end{chunk}
\index{Huet, Gerard}
\index{Oppen, Derek C.}
\begin{chunk}{axiom.bib}
@techreport{Huet80,
author = "Huet, Gerard and Oppen, Derek C.",
title = "Equations and Rewrite Rules: A Survey",
institution = "Stanford Verification Group",
number = "STAN-CS-80-785",
year = "1980",
abstract =
"Equations occur frequently in mathematics, logic and computer
science. in this paper, we survey the main results concerning
equations, and the methods available for reasoning about them and
computing with them. The survey is self-contained and unified, using
traditional abstract algebra.
Reasoning about equations may involve deciding if an equation follows
from a given set of equations (axioms), or if an equation is true in a
given theory. When used in this manner, equations state properties
that hold between objects. Equations may also be used as definitions;
this use is well known in computer science: programs written in
applicative languages, abstract interpreter definitions, and algebraic
data type definitions are clearly of this nature. When these
equations are regarded as oriented ``rewrite rules'', we may actually
use them to compute.
In addition to covering these topics, we discuss the problem of
``solving'' equations (the ``unification'' problem), the problem of
proving termination of sets of rewrite rules, and the decidability and
complexity of word problems and of combinations of equational
theories. We restrict ourselves to first-order equations, and do not
treat equations which define non-terminating computations or recent
work on rewrite rules applied to equational congruence classes.",
paper = "Huet80.pdf"
}
\end{chunk}
\index{Remy, Didier}
\begin{chunk}{axiom.bib}
@inproceedings{Remy89,
author = "Remy, Didier",
title = "Typechecking Records and Variants in a Natural Extension of ML",
booktitle = "POPL 89",
isbn = "978-0-89791-294-5",
publisher = "ACM",
link = "\url{https://www.cs.cmu.edu/~aldrich/courses/819/row.pdf}",
abstract =
"We describe an extension of ML with records where inheritance is
given by ML generic polymorphism. All common operations on records but
concatenation are supported, in particular, the free extension of
records. Other operations such as renaming of fields are added. The
solution relies on an extension of ML, where the language of types is
sorted and considered modulo equations, and on a record extension of
types. The solution is simple and modular and the type inference
algorithm is efficient in practice.",
paper = "Remy89.pdf"
}
\end{chunk}
\index{Wand, Mitchell}
\index{O'Keefe, Patrick}
\begin{chunk}{axiom.bib}
@inproceedings{Wand89,
author = "Wand, Mitchell and O'Keefe, Patrick",
title = "On the Complexity of Type Inference with Coercion",
booktitle = "PFCA 89",
pages = "293-298",
isbn = "0-89791-328-0",
abstract =
"We consider the following problem: Given a partial order $(C,\le)$ of
base types and coercions between them, a set of constants with types
generated from $C$, and a term $M$ in the lambda calculus with these
constants, does $M$ have a typing with this set of types? This
problem abstracts the problem of typability over a fixed set of base
types and coercions (e.g. int $\le$ real, or a fixed set of coercions
between opaque data types). We show that in general, the problem of
typability of lambda-terms over a given partially-ordered set of base
types is NP-complete. However, if the partial order is known to be a
tree, then the satisfiability problem is solvable in (low-order)
polynomial time. The latter result is of practical importance, as
trees correspond to the coercion structure of single-inheritance
object systems.",
paper = "Wand89.pdf"
}
\end{chunk}
\index{Lincoln, Patrick}
\index{Mitchell, John C.}
\begin{chunk}{axiom.bib}
@inproceedings{Linc92,
author = "Lincoln, Patrick and Mitchell, John C.",
title = "Algorithmic Aspects of Type Inference with Subtypes",
booktitle = "POPL 92",
pages = "293-304",
year = "1992",
abstract =
"We study the complexity of type inference for programming languages
with subtypes. There are three language variations that effect the
problem: (i) basic functions may have polymorphic or more limited
types, (ii) the subtype hierarchy may be fixed or vary as a result of
subtype declarations within a program, and (iii) the subtype hierarchy
may be an arbitrary partial order or may have a more restricted form,
such as a tree or lattice. The naive algorithm for infering a most
general polymorphic type, undervariable subtype hypotheses, requires
deterministic exponential time. If we fix the subtype ordering, this
upper bound grows to nondeterministic exponential time. We show that
it is NP-hard to decide whether a lambda term has a type with respect
to a fixed subtype hierarchy (involving only atomic type names). This
lower bound applies to monomorphic or polymorphic languages. We give
PSPACE upper bounds for deciding polymorphic typability if the subtype
hierarchy has a lattice structure or the subtype hierarchy varies
arbitrarily. We also give a polynomial time algorithm for the limited
case where there are of no function constants and the type hierarchy
is either variable or any fixed lattice.",
paper = "Linc92.pdf"
}
\end{chunk}
\index{Davis, Martin D.}
\index{Sigal, Ron}
\index{Weyuker, Elaine J.}
\begin{chunk}{axiom.bib}
@book{Davi94,
author = "Davis, Martin D. and Sigal, Ron and Weyuker, Elaine J.",
title = "Computability, Complexity, and Languages: Fundamentals of
Theoretical Computer Science",
publisher = "Academic Press",
year = "1994",
isbn = "978-0122063824"
}
\end{chunk}
\index{Farmer, William M.}
\begin{chunk}{axiom.bib}
@article{Farm90,
author = "Farmer, William M.",
title = "A Partial Functions Version of Church's Simple Theory of Types",
journal = "The Journal of Symbolic Logic",
volume = "55",
number = "3",
year = "1990",
pages = "1269-1291",
abstract =
"Church's simple theory of types is a system of higher-order logic in
which functions are assumed to be total. We present in this paper a
version of Church's system called PF in which functions may be
partial. The semantics of PF, which is based on Henkin's
general-models semantics, allows terms to be nondenoting but requires
formulas to always denote a standard truth value. We prove that PF is
complete with respect to its semantics. The reasoning mechanism in PF
for partial functions corresponds closely to mathematical practice,
and the formulation of PF adheres tightly to the framework of
Church's system.",
paper = "Farm90.pdf"
}
\end{chunk}
\index{Odifreddi, Piergiorgio}
\begin{chunk}{axiom.bib}
@book{Odif92,
author = "Odifreddi, Piergiorgio",
title = "Classical Recursion Theory: The Theory of Functions and Sets of
Natural Numbers",
publisher = "Elsevier",
year = "1992"
}
\end{chunk}
\index{Buchberger, Bruno}
\index{Collins, George Edwin}
\index{Loos, Rudiger}
\begin{chunk}{axiom.bib}
@book{Buch82,
author = "Buchberger, Bruno and Collins, George Edwin and Loos, Rudiger",
title = "Computer Algebra: Symbolic and Algebraic Computation",
publisher = "Springer",
isbn = "978-3-211-81684-4",
paper = "Buch82.pdf"
}
\end{chunk}
\index{Lauer, M.}
\begin{chunk}{axiom.bib}
@InCollection{Laue82,,
author = "Lauer, M.",
title = "Computing by Homomorphic Images",
booktitle = "Computer Algebra: Symbolic and Algebraic Computation",
pages = "139-168",
year = "1982",
publisher = "Springer",
isbn = "978-3-211-81684-4",
abstract =
"After explaining the general technique of Computing by homomorphic
images, the Chinese remainder algorithm and the Hensel lifting
construction are treated extensively. Chinese remaindering is first
presented in an abstract setting. Then the specialization to Euclidean
domains, in particular $\mathbb{Z}$, $\mathbb{K}[y]$, and
$\mathbb{Z}[y_1,\ldots,y_n]$ is considered. For both techniques,
Chinese remaindering as well as the lifting algorithms, a complete
computational example is presented and the most frequent application
is discussed."
}
\end{chunk}
\index{Huet, Gerard}
\index{Plotkin, G.}
\begin{chunk}{axiom.bib}
@book{Huet91,
author = "Huet, Gerard and Plotkin, G.",
title = "Logical Frameworks",
publisher = "Cambridge University",
year = "1991"
}
\end{chunk}
\index{Harper, Robert}
\index{Honsell, Furio}
\index{Plotkin, Gordon}
@article{Harp93,
author = "Harper, Robert and Honsell, Furio and Plotkin, Gordon",
title = "A Framework for Defining Logics",
journal = "J. ACM",
volume = "40",
number = "1",
year = "1993",
pages = "143-184",
abstract =
"The Edinburgh Logical Framework (LF) provides a means to define (or
present) logics. It is based on a general treatment of syntax, rules,
and proofs by means of a typed &lgr;-calculus with dependent
types. Syntax is treated in a style similar to, but more general than,
Martin-Lo¨f's system of arities. The treatment of rules and proofs
focuses on his notion of a judgment. Logics are represented in LF via
a new principle, the judgments as types principle, whereby each
judgment is identified with the type of its proofs. This allows for a
smooth treatment of discharge and variable occurence conditions and
leads to a uniform treatment of rules and proofs whereby rules are
viewed as proofs of higher-order judgments and proof checking is
reduced to type checking. The practical benefit of our treatment of
formal systems is that logic-independent tools, such as proof editors
and proof checkers, can be constructed.",
paper = "Harp93.pdf"
}
\end{chunk}
\index{Pfenning, Frank}
\begin{chunk}{axiom.bib}
@inproceedings{Pfen89,
author = "Pfenning, Frank",
title = "Elf: A Language for Logic Definition and Verified Metaprogramming",
booktitle = "Logic in Computer Science 89",
year = "1989",
pages = "313-322",
abstract =
"We describe Elf, a metalanguage for proof manipulation environments
that are independent of any particular logical system. Elf is
intended for meta-programs such as theorem provers, proof
transformers, or type inference programs for programming languages
with complex type systems. Elf unifies logic definition (in the style
of LF, the Edinburgh Logical Framework) with logic programming (in the
style of $\lambda$-Prolog). It achieves this unification by giving
types an operational interpretation, much the same way that Prolog gives
certain formulas (Horn-clauses) an operational interpretation.
Novel features of Elf include: (1) the Elf search process
automatically constructs terms that can represent object-logic proofs,
and thus a program need not construct them explicitly, (2) the partial
correctness of meta-programs with respect to a given logic can be
expressed and proved in Elf itself, and (3) Elf exploits Elliott’s
unification algorithm for a $\lambda$-calculus with dependent types.",
paper = "Pfen89.pdf"
}
\end{chunk}
\index{Pfenning, Frank}
\begin{chunk}{axiom.bib}
@inproceedings{Pfen91,
author = "Pfenning, Frank",
title = "Logic Programming in the LF Logical Framework",
booktitle = "Proc. First Workshop on Logical Frameworks",
year = "1991",
paper = "Pfen91.pdf"
}
\end{chunk}
\index{Pfenning, Frank}
\begin{chunk}{axiom.bib}
@inproceedings{Pfen91a,
author = "Pfenning, Frank",
title = "Unification and Anti-Unification in the Calculus of Constructions",
booktitle = "Logic in Computer Science 91",
year = "1991",
pages = "74-85",
abstract =
"We present algorithms for unification and anti- unification in the
Calculus of Constructions, where occurrences of free variables (the
variables subject to instantiation) are restricted to higher-order
patterns, a notion investigated for the simply-typed $\lambda$-calculus
by Miller. Most general unifiers and least common anti-instances are
shown to exist and are unique up to a simple equivalence. The
unification algorithm is used for logic program execution and type and
term reconstruction in the current implementation of Elf and has
shown itself to be practical. The main application of the
anti-unification algorithm we have in mind is that of proof
generalization.",
paper = "Pfen91a.pdf"
}
\end{chunk}
---
books/bookheader.tex | 2 +
books/bookvol10.1.pamphlet | 5345 ++++++++++++++++++++++++++++++++++++++++
books/bookvolbib.pamphlet | 3048 +++++++++++++++++++++++-
books/bookvolbug.pamphlet | 103 +-
books/catmac.sty | 1013 ++++++++
books/dissdef.sty | 434 ++++
changelog | 7 +
patch | 3011 ++++++++++++++++++++++-
src/axiom-website/patches.html | 2 +
9 files changed, 12957 insertions(+), 8 deletions(-)
create mode 100644 books/catmac.sty
create mode 100644 books/dissdef.sty
diff --git a/books/bookheader.tex b/books/bookheader.tex
index 76f3032..64bcee5 100644
--- a/books/bookheader.tex
+++ b/books/bookheader.tex
@@ -9,6 +9,8 @@ citecolor=red}
\usepackage{enumitem} % for noitemsep in itemize
\usepackage{bussproofs} % for prooftree environment
\usepackage[margin=1in]{geometry} % for white pages
+\usepackage{catmac} % for Weber chapter in vol 10.1
+\usepackage{dissdef} % for Weber chapter in vol 10.1
\setlength{\textwidth}{400pt}
\makeindex
diff --git a/books/bookvol10.1.pamphlet b/books/bookvol10.1.pamphlet
index c18e49f..daf8a57 100644
--- a/books/bookvol10.1.pamphlet
+++ b/books/bookvol10.1.pamphlet
@@ -8636,6 +8636,5351 @@ If we zigzag properly, we can get Gauss' formula for interpolation:
\[y(u)=y_0+u\Delta{}y_0+\frac{u(u-1)}{2}\Delta^2y(-1)+
\frac{u(u^2-1)}{3!}\Delta^3y(-1)+\cdots\]
+\chapter[Type Systems]{Type Systems for Computer Algebra by Andreas Weber}
+
+This chapter is based on a PhD thesis by Andreas Weber\cite{Webe93b}.
+Changes have been made to integrate it.
+
+We study type systems for computer algebra systems, which frequently
+correspond to the ``pragmatically developed'' typing constructs used
+in {\sf Axiom}.
+
+A central concept is that of {\em type classes} which correspond to
+{\sf Axiom} categories. We will show that types can be syntactically
+described as terms of a regular order-sorted signature if no type
+parameters are allowed. Using results obtained for the functional
+programming language {\sf Haskell} we will show that the problem of
+{\em type inference} is decidable. This result still holds if
+higher-order functions are present and {\em parametric polymorphism}
+is used. These additional typing constructs are useful for further
+extensions of existing computer algebra systems: These typing concepts
+can be used to implement category theoretic constructs and there are
+many well known constructive interactions between category theory and
+algebra.
+
+On the one hand we will show that there are well known techniques to
+specify many important type classes algebraically, and we will also
+show that a formal and algorithmically Feasible treatment of the
+interactions of algebraically specified data types and type classes is
+possible. On the other hand we will prove that there are quite
+elementary examples arising in computer algebra which need very
+``strong'' formalisms to be specified and are thus hard to handle
+algorithmically.
+
+We will show that it is necessary to distinguish between types and
+elements as parameters of parameterized type classes. The type
+inference problem for the former remains decidable whereas for the
+latter it becomes undecidable. We will also show that such a
+distinction can be made quite naturally.
+
+Type classes are second-order types. Although we will show that there
+are constructions used in mathematics which imply that type classes
+have to become first-order types in order to model the examples
+naturally, we will also argue that this does not seem to be the case
+in areas currently accessible for an algebra system. We will only
+sketch some systems that have been developed during the last years in
+which the concept of type classes as first-order types can be
+expressed. For some of these systems the type inference problem was
+proven to be undecidable.
+
+Another fundamental concept for a type system of a computer algebra
+system --- at least for the purpose of a user interface --- are {\em
+coercions}. We will show that there are cases which can be modeled by
+coercions but not by an ``inheritance mechanism'', i.\,e.\ the concept
+of coercions is not only orthogonal to the one of type classes but
+also to more general formalisms as are used in object-oriented
+languages. We will define certain classes of coercions and impose
+conditions on important classes of coercions which will imply that the
+meaning of an expression is independent of the particular coercions
+that are used in order to type it.
+
+
+We shall also impose some conditions on the interaction between
+polymorphic operations defined in type classes and coercions that will
+yield a unique meaning of an expression independent of the type which
+is assigned to it --- if coercions are present there will very
+frequently be several possibilities to assign types to expressions.
+
+Often it is not only possible to coerce one type into another but it
+will be the case that two types are actually {\em isomorphic}. We
+will show that isomorphic types have properties that cannot be deduced
+from the properties of coercions and will shortly discuss other
+possibilities to model type isomorphisms. There are natural examples
+of type isomorphisms occurring in the area of computer algebra that
+have a ``problematic'' behavior. So we will prove for a certain
+example that the type isomorphisms cannot be captured by a finite set
+of coercions by proving that the naturally associated equational
+theory is not finitely axiomatizable.
+
+Up to now few results are known that would give a clear dividing line
+between classes of coercions which have a decidable type inference
+problem and classes for which type inference becomes undecidable. We
+will give a type inference algorithm for some important classes of
+coercions.
+
+Other typing constructs which are again quite orthogonal to the
+previous ones are those of {\em partial functions} and of {\em types
+depending on elements}. We will link the treatment of {\em partial
+functions} in {\sf Axiom} to the one used in order-sorted algebras and
+will show some problems which arise if a seemingly more expressive
+solution were used. There are important cases in which {\em types
+depending on elements} arise naturally. We will show that not only
+type inference but even type checking is undecidable for relevant
+cases occurring in computer algebra.
+
+Types have played an extremely important role in the development and
+study of programming languages. They have become so prevalent that
+type theory is now recognized as an area of its own within computer
+science. The benefits which can be derived from the presence of types
+in a language are manifold. Through type checking many errors can be
+caught before a program is ever run, thus leading to more reliable
+programs. Types form also an expressive basis for module systems,
+since they prescribe a machine-verifiable interface for the code
+encapsulated within a module. Furthermore, they may be used to
+improve performance of code generated by a compiler.
+
+
+However, most computer algebra systems are based on untyped languages.
+Nevertheless, at least in the description and specification of many
+algorithms a terminology is used which can be seen as attributing
+``types'' to the computational objects. In {\sf Maple~V}
+\cite{Char91} and in {\sf Mathematica} \cite{Wolf91}, which
+are both based on untyped languages, it is even possible to attach
+``tags'' to data structures which describe types corresponding to the
+mathematical structures the data are supposed to represent.
+
+In the area of computer algebra, the problem of finding appropriate
+type systems which are supported by the language is that on the one
+hand, the type system has to consider the requirements of a computer
+system and on the other, it should allow for the mathematical
+structures a system is dealing with to have corresponding types.
+
+The development of {\sf Axiom} \cite{Jenk84b}, \cite{Suto87},
+\cite{Jenk92} is certainly a break-through since the language
+itself is typed with types corresponding to the mathematical
+structures the system deals with.
+
+However, the typing constructs used in {\sf Axiom} have been
+``pragmatically developed.'' Some are not even formally defined and
+only very few studies on formal properties of such a system have been
+undertaken. Even if other approaches to a type system in this area
+are considered --- such as the ``object-oriented'' one used for {\sf
+VIEWS} \cite{Abda86} --- we have found relatively few formal
+studies of type systems suited for the purpose of computer algebra
+systems in the literature, although a formal treatment of some typing
+constructs occurring in computer algebra was already given almost
+twenty years ago in \cite{Loos74}.
+
+So the situation is different from the one in other areas of computer
+science in which untyped languages are prevalent. For instance, most
+logic programming languages are untyped. This is a consequence of the
+fact that logic programming has its roots in first-order logic, which
+is essentially untyped. Nevertheless, the progress of type theory in
+the last decade has allowed the development of several type systems
+for logic programming languages. Moreover, the formal properties of
+these type systems have been studied extensively (see e.\,g.\
+\cite{Smol89a}, \cite{Frue91}, \cite{Kife91}, and the
+articles in the collection \cite{Pfen92}, in which also a
+comprehensive bibliography on the topic is given).
+
+We will not design a typed computer algebra language in this thesis in
+which the mathematical structures a program deals with have a
+correspondence in the type system. It does not seem possible to
+design and implement a language of similar power as {\sf Axiom} within
+a PhD-project. There are several proposals of languages for computer
+algebra systems\footnote{The author knows of Foderaro's {\sf NEWSPEAK}
+\cite{Fode83}, Coolsaet's {\sf MIKE} \cite{Cool92}, and
+Dalmas' {\sf XFun} \cite{Dalm92}.} which are designed and partly
+implemented as part of a PhD-project that incorporate some typing
+concepts, but which can be seen --- more or less --- as subsets of the
+typing constructs of {\sf Axiom}.
+
+Instead we will treat typing constructs which are similar in power to
+the ones of {\sf Axiom}. We will define type systems of various
+strength and will investigate their properties. Discussing a variety
+of examples we will show their relevance for a computer algebra
+system. We will also discuss some examples which are not implemented
+in a system as yet in order to give some estimates about the
+extendability of a system based on such typing principles. This is
+one of the shortcomings of many other investigations in which very
+often only examples that can be modeled are discussed. We hope that
+our discussion of a variety of examples will help to obtain
+characterization theorems of mathematical structures which can be
+modeled by certain typing constructs. This would be the best
+solution. However, it seems to be a large-scale task to obtain such
+characterization theorems in many cases. A problem in this connection
+is certainly that one has to define precisely a class of mathematical
+structures a program is dealing with at all. Current computer algebra
+programs sometimes deal with objects of universal algebra, sometimes
+with those of higher-order universal algebra, sometimes with those of
+first-order model theory, or sometimes with those of category theory,
+to mention only some possibilities.
+
+We will prove several properties of such type systems. A very
+important feature is the possibility of {\em type inference}. Given
+an expression the system should be able to infer a correct type for it
+whenever possible and reject it otherwise. Since the interpretation
+of an expression written in the standard mathematical notation
+requires a kind of type inference very frequently the possibility of
+type inference improves considerably the usefulness of a system for a
+user. Thus we will investigate the problems connected with type
+inference extensively and will also give some results on the
+computational complexity of various type inference problems. Another
+important problem we shall investigate in various, precisely defined
+ways is a possible ambiguity of a type system.
+
+Some of the results we give are contained in some form in the
+literature, especially in papers on type systems for functional
+languages. Nevertheless, it seems to have escaped prior notice that
+these results are applicable to the typing problems arising in
+computer algebra.
+
+On the one hand it is useful to have a system which can handle as many
+mathematical structures as possible. For many mathematicians a
+computer algebra system would be a very valuable tool if it allowed
+some computations in rather complicated mathematical structures.
+Since many of those computations would be fairly basic it would
+suffice for these users to have a system in which they could model
+those structures easily, even if that modeling was not very efficient.
+Among the existing systems {\sf Axiom} is one of the few which gives
+the possibility for such work.\footnote{The new version of
+{\sf Cayley} \cite{Butl90} allows similar possibilities but fewer
+structures have been implemented as yet.} So it seems to be necessary
+to have a safe foundation for the constructs found in such a universal
+system as {\sf Axiom}.
+
+On the other hand many computations that have to be performed reach
+the limits of existing computing power. So the algorithms should be
+as efficient as possible in order to be useful. Since it seems to be
+impossible to have a general system that is always as efficient as a
+more special one --- and this thesis will contain some results which
+can be viewed as a proof of this claim --- we will not only develop a
+framework for a general computer algebra system and discuss its
+properties but will also discuss the properties of some subsystems.
+The author hopes that some of these results will be useful for the
+design of symbolic manipulation systems or the design of user
+interfaces for such systems.
+
+The organization of the thesis will be as follows.
+
+In Sec.~\ref{chprelude} we will collect some definitions and facts
+which will be needed later. Most of the material in this chapter can
+be found scattered in the literature. Moreover, we will fix the
+notation and will give some discussion on the terminology used in this
+thesis as compared to the one found in the literature.
+
+A central concept is that of {\em type classes} which correspond to
+{\sf Axiom} categories and will be the subject of
+Sec.~\ref{chtycla}.\footnote{They are similar to the {\em varieties}
+of {\sf Cayley}, if a {\sf Cayley} {\em class} is interpreted as a
+type, which can be done using the concept of {\em types depending on
+elements} (see below). They are also similar to {\em container
+classes} used in object-oriented programming. However, we will not
+give a systematic treatment of constructs of object-oriented
+programming in this thesis.} We will show that types can be
+syntactically described as terms of a regular order-sorted signature
+if no type parameters are allowed. Using results obtained for the
+functional programming language {\sf Haskell} we will show that the
+problem of {\em type inference} is decidable. This result still holds
+if higher-order functions are present and {\em parametric
+polymorphism} is used. These additional typing constructs are useful
+for further extensions of existing computer algebra systems: These
+typing concepts can be used to implement category theoretic constructs
+and there are many well known constructive interactions between
+category theory and algebra.
+
+On the one hand we will show that there are well known techniques to
+specify many important type classes algebraically, and we will also
+show that a formal treatment of the interactions of algebraically
+specified data types and type classes is possible. On the other hand
+we will prove that there are quite elementary examples arising in
+computer algebra which need very ``strong'' formalisms to be
+specified.
+
+We will show that it is necessary to distinguish between types and
+elements as parameters of parameterized type classes. The type
+inference problem for the former remains decidable whereas for the
+latter it becomes undecidable. We will also show that such a
+distinction can be made quite naturally.
+
+Type classes are second-order types. Although we will show that there
+are constructions used in mathematics which imply that type classes
+have to become first-order types in order to model the examples
+naturally, we will also argue that this does not seem to be the case
+in areas currently accessible for an algebra system. We will only
+sketch some systems that have been developed during the last years in
+which the concept of type classes as first-order types can be
+expressed. For some of these systems the type inference problem was
+proven to be undecidable, thus showing one of the drawbacks of
+stronger formalisms.
+
+In Sec.~\ref{chapcoer} we will treat the concept of {\em coercions}
+which is another fundamental concept for a type system of a computer
+algebra system, at least for the purpose of a user interface. We will
+show that there are cases which can be modeled by coercions but not by
+an ``inheritance mechanism'', i.\,e.\ the concept of coercions is not
+only orthogonal to the one of type classes but also to formalisms
+extending type classes. We will define certain classes of coercions
+and impose conditions on important classes of coercions which will
+imply that the meaning of an expression is independent of the
+particular coercions that are used in order to type it. These results
+will also appear in \cite{Webe95}.
+
+We shall also impose some conditions on the interaction between
+polymorphic operations defined in type classes and coercions that will
+yield a unique meaning of an expression independent of the type which
+is assigned to it --- if coercions are present there will very
+frequently be several possibilities to assign types to expressions.
+
+Often it is not only possible to coerce one type into another but it
+will be the case that two types are actually {\em isomorphic}. We
+will show that isomorphic types have properties that cannot be deduced
+from the properties of coercions and will shortly discuss other
+possibilities to model type isomorphisms.
+
+Unfortunately, there are natural examples of type isomorphisms
+occurring in the area of computer algebra that have a ``problematic''
+behavior. For a major example of types having type isomorphisms that
+cannot be captured by a finite set of coercions, we will provide a
+proof that no such finite set can be given by proving that the
+naturally associated equational theory is not finitely axiomatizable.
+This example and the given proof are published by the author in
+\cite{Webe05}.
+
+We will give a semi-decision procedure for type inference for a system
+having type classes and coercions and a decision procedure for a
+subsystem which covers many important cases occurring in computer
+algebra. Up to now few results are known that would give a clear
+dividing line between classes of coercions which have a decidable type
+inference problem and classes for which type inference becomes
+undecidable. However, even in decidable cases the type inference
+problem in the presence of coercions is a hard problem. Even in cases
+in which the possible coercions are rather restricted the type
+inference problem was proven to be NP-hard for functional languages.
+
+Two typing constructs which are again quite orthogonal to the previous
+ones are treated in Sec.~\ref{chapothtyc}. We will link the treatment
+of {\em partial functions} in {\sf Axiom} to the one used in
+order-sorted algebras and will show some problems which arise if a
+seemingly more expressive solution were used. Nevertheless, some
+information is lost by the used solution and we sketch a proposal how
+the lost information could be regained in certain cases.
+
+There are important cases in which {\em types depending on elements}
+arise naturally. Unfortunately, not only type inference but even type
+checking are undecidable for relevant cases occurring in computer
+algebra, i.\,e.\ static type checking is not possible. On the one
+hand we will show that already types which have to be given to the
+objects in standard algorithms of almost any general purpose computer
+algebra program will prohibit static type checking. On the other hand
+it might be possible to restrict the types depending on elements
+available to a user of a high-level user interface to classes which
+have decidable type checking or even type inference problems. We will
+show that several formalisms have been developed during the last years
+which might be relevant in this respect.
+
+\section{Prelude}
+\label{chprelude}
+
+We will recall some definitions and facts which will be needed later.
+ All of this material can be found scattered in the literature.
+Moreover, we will fix the notation and will give some
+discussions of the terminology used in this thesis in
+comparison to the one found in the literature.
+
+\subsection{Terminology}
+
+\subsubsection{Abstract Data Types}
+
+
+The term {\em data type} has many informal usages in programming and
+programming methodology. For instance, Gries lists seven
+interpretations in \cite{Grie78}.
+
+In this thesis we will deal with different meanings of the term {\em
+abstract data type} (ADT). On the one hand there is the meaning used
+in the context of algebraic specifications as it is used e.\,g.\ in
+the survey of Wirsing \cite{Wirs91}. In this context an abstract
+datatype given by a specification is a class of certain many-sorted
+(or order-sorted) algebras which ``satisfy'' the specification.
+
+On the other hand there is the usage of this term for data types whose
+representation is hidden. For instance, in the report on the language
+{\sf Haskell} \cite{Huda92} the authors state ``the
+characteristic feature of an ADT is that the {\em representation type
+is hidden}; all operations on the ADT are done at an abstract level
+which does not depend on the representation''. The explanation given
+in the glossary of the book on {\sf Axiom} \cite{Jenk92} is
+quite similar:
+
+\begin{quote}
+{\bf abstract datatype} \\ a programming language principle used in
+{\sf Axiom} where a datatype definition has defined in two parts: (1)
+a {\em public} part describing a set of {\em exports}, principally
+operations that apply to objects of that type, and (2) a {\em private}
+part describing the implementation of the datatype usually in terms of
+a {\em representation} for objects of the type. Programs that create
+and otherwise manipulate objects of the type may only do so through
+its exports. The representation and other implementation information
+is specifically hidden.
+\end{quote}
+
+Usually the purpose of abstract data types in the sense of algebraic
+specifications is for the specification of abstract data types in the
+sense of the quotations given above. However, as we will show in this
+thesis, the abstract data types in the former sense can also be used
+for the specification of other classes of computational objects than
+abstract data types in the latter sense.
+
+\subsubsection{Polymorphism}
+
+Although the term {\em polymorphic function} is used in the
+literature, there are usually no definitions given.
+
+In the glossary of \cite{Jenk92} only examples of polymorphic
+functions are given but no definition. Also in the book by Aho,
+et~al.\ \cite[p.~364]{Ahox86}, the term is explained by
+giving examples of polymorphic functions.
+
+In the recent survey of Mitchell \cite{Mitc91a} the author states
+explicitly that he does not want to give a definition of {\em
+polymorphism}, but that he will only give definitions of some
+``polymorphic lambda-calculi''.
+
+There is a distinction between {\em parametric polymorphism} and {\em
+ad hoc polymorphism} which seems to go back to Strachey \cite{Stra00}
+(cited after \cite{Gogu89}):
+
+\begin{quotation}
+
+In {\em ad hoc} polymorphism there is no simple systematic way of
+determining the type of the result from the type of the
+arguments. There may be several rules of limited extent which reduce
+the number of cases, but these are themselves {\em ad hoc} both in
+scope and in content. All the ordinary arithmetic operations and
+functions come into this category. It seems, moreover, that the
+automatic insertion of transfer functions by the compiling system is
+limited to this class.
+
+Parametric polymorphism is more regular and may be illustrated by an
+example. Suppose f is a function whose arguments is of type $\alpha$
+and whose result is of type $\beta$ (so that the type of f might be
+written $\alpha \longrightarrow \beta$, and that L is a list whose
+elements are all of type $\alpha$ (so that the type of L is
+$\alpha{\bf list}$). We can imagine a function, say Map, which
+applies f in turn to each member of L and makes a list of the
+results. Thus Map[f,L] will produce a $\beta{\bf list}$. We would
+like Map to work on all types of list provided f was a suitable
+function, so that Map would have to be polymorphic. However its
+polymorphism is of a particularly simple parametric type which could
+be written $(\alpha \longrightarrow \beta, \alpha{\bf list})
+\longrightarrow \beta{\bf list}$, where $\alpha$ and $\beta$ stand for
+any types.
+
+\end{quotation}
+
+A widely accepted approach to parametric polymorphism is
+the Hindley-Milner type system \cite{Hind69},
+\cite{Miln78}, \cite{Dama82},
+which is used in Standard ML \cite{Miln90}, \cite{Miln91},
+Miranda \cite{Turn85}, \cite{Turn86} and other languages.
+
+We will use the term parametric polymorphism in this sense.
+
+There is no widely accepted approach to ad-hoc polymorphism. In its
+general form, we will use the word ad-hoc polymorphism and overloading
+quite synonymously indicating that no restriction is imposed on the
+possibility to overload an operator symbol.
+
+However, there is a third form of polymorphism which will play a
+central role in this thesis and for which an appropriate name is
+missing. It is the polymorphism which occurs when {\em categories} in
+the {\sf Axiom}-terminology resp.\ {\em type classes} in the {\sf
+Haskell}-terminology are used. In \cite{Wadl88} the nice
+negative formulation ``How to make {\em ad-hoc} polymorphism less {\em
+ad-hoc}'' is used but no proposal for a positive name is given. When
+necessary we will call the polymorphism encountered by type classes
+simply {\em type-class polymorphism}.\footnote{A term like {\em
+categorical polymorphism} seems to be misleading, especially since we
+prefer the word type class instead of category.}
+
+Sometimes a distinction is made between {\em polymorphic functions}
+and {\em generic function calls}. The intended meaning --- e.\,g.\ in
+\cite{Fode83} --- is that {\em polymorphic} refers to functions in
+which the same algorithm works on a wide range of data types, whereas
+{\em generic} refers to function declarations in the language which
+are resolved by different pieces of code.
+
+However, a clear distinction can only be made if there is an untyped
+language to which the typed language is reduced.\footnote{This is the
+case for typed-functional programming languages which are usually
+translated into the untyped lambda-calculus. It can also be put in a
+precise form that the lambda-calculus is untyped.} On the other-hand
+if typing information is used by the run-time system it does not seem
+to be possible to have such a distinction. So in the book by Aho,
+et~al.\ \cite{Ahox86} no distinction is made between these terms.
+
+Nevertheless, we will sometimes use these terms with the flavor as is
+given in \cite{Fode83} when it will be clear how the language
+constructs in discussion can be translated into untyped ones.
+
+\subsubsection{Coercions}
+
+We will assume that we have a mechanism in the language to declare
+some functions between types to be {\em coercions}, i.\,e.\ conversion
+functions which are automatically inserted by the system if necessary.
+
+The usage of this terminology seems to be more or less standard, as
+the definition in the book by Aho, et~al.\ \cite[p.~359]{Ahox86} shows:
+
+\begin{quote}
+Conversion from one type to another is said to be {\it implicit} if it
+is to be done automatically by the compiler. Implicit type
+conversions, also called {\it coercions}, are limited in many
+languages to situations where no information is lost in principle;
+\end{quote}
+
+The definitions in the glossary of the book on {\sf Axiom}
+\cite{Jenk92} is quite similar:
+
+\begin{quote}
+{\bf coercion}\\ an automatic transformation of an object of one {\it
+type} to an object of a similar or desired target type. In the
+interpreter, coercions and {\it retractions} are done automatically by
+the interpreter when a type mismatch occurs. Compare {\bf conversion}.
+\end{quote}
+
+\begin{quote}
+{\bf conversion}\\ the transformation of an object of one {\it type}
+to one of another type. Conversions that can be performed
+automatically by the interpreter are called {\it coercions}. These
+happen when the interpreter encounters a type mismatch and a similar
+or declared target type is needed. In general, the user must use the
+infix operation ``::'' to cause this transformation.
+\end{quote}
+
+However, there are some issues which have to be clarified. In the
+following a {\em coercion\/} will always be a {\em total function}.
+Although we will see that it is desirable to have injective coercions
+(``no information is lost in principle'') we will not require that
+coercions are injective by the definition of the term.
+
+We will use the term {\em retraction} for non-total conversion
+functions. Our usage of this term is more general than the one in
+{\sf Axiom}:
+\begin{quote}
+{\bf retraction} \\ to move an object in a parameterized domain back
+to the underlying domain, for example to move the object 7 from a
+``fraction of integers'' (domain {\sf Fraction Integer}) to ``the
+integers'' (domain {\sf Integer}).
+\end{quote}
+
+In several papers --- e.\,g.\ \cite{Fuhx90}, \cite{Mitc91} --- the
+term {\em subtype} is used if there is a coercion from one type (the
+``subtype'') into another type (the ``supertype''). Since the term
+``subtype'' has several other meanings in the literature, we will
+avoid it. Only in our notation we will be close to that terminology
+and will write $t_1 \subtype t_2$ if there is a coercion $\phi: t_1
+\longrightarrow t_2$.
+
+\subsection{General Notation}
+
+As usual we will use {\em ``iff''} for {\em ``if, and only
+if''}.\index{iff|ii}
+
+The non-negative integers will be denoted by $\NN$. \index{
+N@$\NN$|ii} The integers will be denoted by $\ZZ$ and the rationals by
+$\QQ$. For $n \in \NN$ we will denote the integers modulo $n$ by
+$\ZZ_n$. We will use these symbols both for the algebras (of the
+usual signatures) and the underlying sets. Since we use these
+ambiguous notations only in parts exclusively written for human beings
+and not for machines, there will not be any problems. Nevertheless, a
+major part of this thesis will deal with problems which arise from
+ambiguities which mathematicians usually can resolve easily. We will
+show how some of them can be treated in a clean formal way accessible
+to machines, sometimes causing computationally hard problems.
+
+The set of strings over a set $L$ --- i.\,e.\ the set of finite
+sequences of elements of $L$ --- will be $L^*$,\index{ Lstar@$L^*$|ii}
+where $\varepsilon$ is the empty string. \index{
+epsilon@$\varepsilon$|ii}
+
+The length of a string $s \in L^*$ will be denoted by $|s|$. \index{
+"| "|@$"|\cdot"|$|ii} We will also denote the cardinality of a set $A$
+by $|A|$.\index{ "| "|@$"|\cdot"|$|ii}
+
+\subsection{Partial Orders and Quasi-Lattices}
+
+{\bf Definition 1. (Preorder)}.
+\index{preorder|ii}
+\index{order!preorder|ii}
+{\sl A binary relation which is reflexive and
+transitive is a {\em preorder}.}
+
+A preorder which is also antisymmetric is a partial order.
+\index{partial order|ii}
+\index{order!partial|ii}
+
+{\bf Definition 2.}
+{\sl Let $\langle M ,\leq \rangle$ be a partially ordered set.
+Then $c \in M$ is a {\em common lower bound\/} of $a$ and $b$
+($a,b \in M$) if $c \leq a$ and $c \leq b$.}
+
+{\sl Moreover, $c \in M$ is a {\em common upper bound\/} of $a$ and $b$
+if $a \leq c$ and $b \leq c$.}
+
+{\bf Definition 3.}
+{\sl Let $\langle M ,\leq \rangle$ be a partially ordered set.
+Then $c \in M$ is called the {\em infimum} of $a$ and $b$
+($a,b \in M$) if $c$ is a lower bound of $a$ and $b$ and}
+$$\forall d \in M : d \leq a \mbox{ and } d \leq b \Longrightarrow d \leq c.$$
+
+{\sl Furthermore, $c$ is called the {\em supremum} of
+$a$ and $b$ if it is an upper bound of $a$ and $b$ and}
+$$\forall d \in M : a \leq d \mbox{ and } b \leq d \Longrightarrow c \leq d.$$
+
+It is easy to verify that infima and suprema are unique if they exist.
+By induction the infimum and the supremum of any finite subset
+of a partially ordered set $\langle M ,\leq \rangle$ can be defined.
+
+{\bf Definition 4.}
+{\sl A partially ordered set $ \langle M ,\leq \rangle$ is
+a {\em lower quasi-lattice\/} if for any $a, b \in M$
+$a$ and $b$ have an infimum whenever they have a common lower bound.
+It is a {\em lower semi-lattice\/} if any $a,b \in M$ have an infimum.}
+
+{\sl A partially ordered set $ \langle M ,\leq \rangle$ is
+an {\em upper quasi-lattice\/} if for for any $a, b \in M$
+$a$ and $b$ have a supremum whenever they have a common upper bound.
+It is an {\em upper semi-lattice\/} if any $a,b \in M$ have a supremum.}
+
+{\sl A partially ordered set $ \langle M ,\leq \rangle$ is
+a {\em quasi-lattice} if it is an upper and a lower quasi-lattice.
+It is a {\em lattice} if it is both a upper and a lower semi-lattice.}
+
+{\bf Definition 5. (Free Lower Semi-Lattices)}
+\label{freesemilat}
+Let $\langle M ,\leq \rangle$ be a
+partially ordered set.
+The {\em free lower semi-lattice on $\langle M ,\leq \rangle$}
+is the following partially ordered set $\langle F, \preceq \rangle$:
+\begin{enumerate}
+\item $F$ is the set of all non-empty subsets of $M$
+whose elements are pairwise incomparable with respect to $\leq$.
+\item If $S_1, S_2 \in F$ then
+ $$ S_1 \preceq S_2 \quad \Longleftrightarrow \quad
+ \forall s_2 \in S_2 \: \exists s_1 \in S_1 \, . \,
+ s_1 \leq s_2.$$
+\end{enumerate}
+
+{\bf Lemma 1. (Free Lower Semi-Lattices)}
+\label{lefrelosela}
+{\sl Let $\langle M ,\leq \rangle$ be a partially ordered set.
+Then the free lower semi-lattice on $\langle M ,\leq \rangle$
+is a lower semi-lattice.}
+
+\begin{proof}
+{\sl Let $S_1, S_2 \in F$ be arbitrary. Since $S_1 \in F$ and
+$S_2 \in F$ the chains in $S_1 \cup S_2$ with respect to $\leq$ have
+length at most 2. Let}
+$$H = \{ d \in S_1 \cup S_2 \mid \exists s \in S_1 \cup S_2 \, . \,
+s < d \}$$
+{\sl and}
+$$\overline{S}= (S_1 \cup S_2) - H.$$
+
+Since $\overline{S}$ is not empty and contains only incomparable
+elements by construction we have $\overline{S} \in F$.
+
+We claim that $\overline{S}$ is the infimum of $S_1$ and $S_2$.
+
+We have $\overline{S} \preceq S_1$ because for any $s \in S_1$
+either $s \in \overline{S}$ or there is a $s' \in
+\overline{S}$ such that $s' < s$. Similarly $\overline{S} \preceq S_2$.
+
+Let $L \in F$ be a common lower bound of $S_1$ and $S_2$
+with respect to $\preceq$, i.\,e.\ $L \preceq S_1$ and
+$L \preceq S_2$.
+Then for any $s \in S_1 \cup S_2$ there is an
+$l \in L$ such that $l \leq s$.
+Since $$\overline{S} \subseteq S_1 \cup S_2$$
+we thus have $L \preceq \overline{S}$ by the definition
+of $\preceq$.
+\qed
+\end{proof}
+
+\begin{remark}
+The statement given in \cite[p.~9]{Nipk91} that the union of
+two sets of incomparable elements is a set of incomparable elements
+and is the infimum of these sets with respect to the ordering given in
+Def. 5 is false in general. The proof of Lemma~1 shows the correct
+construction.
+\end{remark}
+
+\begin{remark}
+If we define semi-lattices algebraically (see e.\,g.\
+\cite[\S~6]{Grae79}),
+then the free lower semi-lattice on $\langle M ,\leq \rangle$
+is indeed a free semi-lattice.
+\end{remark}
+
+{\bf Lemma 2.}
+{\sl Let $\langle M ,\leq \rangle$ be a {\em finite}
+partially ordered set. Then $\langle M ,\leq \rangle$
+is a lower quasi-lattice iff it is an upper quasi-lattice.}
+
+\begin{proof}
+Let $ \langle M ,\leq \rangle$ be a lower quasi-lattice
+in which $a$ and $b$ have a common upper bound. We have
+to show that $a$ and $b$ have a supremum. Since the set
+$$I= \{c \in M : a \leq c \mbox{ and } b \leq c \}$$
+is nonempty and finite and $\langle M ,\leq \rangle$ is a lower
+quasi-lattice the infimum $c$ of $I$ exists. Clearly $c$ is the supremum
+of $a$ and $b$.
+
+The other direction is shown analogously. \qed
+\end{proof}
+
+{\bf Lemma 3.}
+\label{lenolat}
+{\sl Let $ \langle M ,\leq \rangle$ be a finite partially ordered set. Then
+$ \langle M ,\leq \rangle$ is not a quasi-lattice
+iff there are $a,b,c,d \in M$ such that
+\begin{enumerate}
+\item $a \leq c$ and $a \leq d$,
+\item $b \leq c$ and $b \leq d$,
+\item $a \not \leq b$ and $b \not \leq a$,
+\item $c \not \leq d$ and $d \not \leq c$,
+\item \label{lacolenolat} there is no $e \in M$ which is a common upper bound
+of $a$ and $b$ and a common lower bound of $c$ and $d$.
+\end{enumerate}}
+
+\begin{figure}
+
+\begin{center}
+\unitlength=1mm
+\thinlines
+\begin{picture}(45.00,50.00)
+\put(13.00,10.00){\makebox(0,0)[cc]{$a$}}
+\put(37.00,10.00){\makebox(0,0)[cc]{$b$}}
+\put(13.00,40.00){\makebox(0,0)[cc]{$c$}}
+\put(37.00,40.00){\makebox(0,0)[cc]{$d$}}
+\put(13.00,13.00){\vector(0,1){24.00}}
+\put(13.00,13.00){\vector(1,1){24.00}}
+\put(37.00,13.00){\vector(0,1){24.00}}
+\put(37.00,13.00){\vector(-1,1){24.00}}
+\put(9.00,25.00){\makebox(0,0)[cc]{$\leq$}}
+\put(41.00,25.00){\makebox(0,0)[cc]{$\leq$}}
+\put(22.00,20.00){\makebox(0,0)[cc]{$\leq$}}
+\put(28.00,20.00){\makebox(0,0)[cc]{$\leq$}}
+\end{picture}
+\end{center}
+
+\caption{Ad Lemma 3}
+\end{figure}
+
+\begin{proof}
+Assume $\langle M, \leq \rangle$ is a finite partially ordered set having
+elements $a,b,c,d \in M$ that satisfy the conditions of the lemma.
+Since $a$ and $b$ have a common upper bound, we are done
+if we can show that
+they do not have a supremum. Assume towards a contradiction they had
+a supremum $e$. Since $c$ and $d$ are common upper bounds
+of $a$ and $b$ and $e$ is the supremum of $a$ and $b$,
+we had
+$e \leq c$ and $e \leq d$, a contradiction to condition~\ref{lacolenolat}
+of the lemma.
+
+Now let $\langle M, \leq \rangle$ be a finite partially ordered set
+which is not a quasi-lattice.
+Then there are $a,b \in M$ which have a common upper bound $c$
+but not a supremum. Since $M$ is finite we can assume w.\,l.\,o.\,g.\
+that there is no $c' \leq c$ which is also a common upper bound
+of $a$ and $b$ (if there is one, take $c'$ instead of $c$).
+Since $a$ and $b$ have no supremum, there is a common upper bound
+$d$ of $a$ and $b$ such that $d \not \leq c$ and $c \not \leq d$.
+These elements $a, b, c, d$ satisfy the conditions of the lemma.\qed
+\end{proof}
+
+\subsection{Order-Sorted Algebras}
+
+There is a growing literature on order-sorted algebras. Some
+comprehensive sources are the thesis of Schmidt-Schau{\ss}
+\cite{Schm89}, the survey by Smolka, et~al.\ \cite{Smol89}, and the
+articles by Goguen \& Meseguer \cite{Gogu89} and by Waldmann
+\cite{Wald92}. In \cite{Como90} Comon shows that an order-sorted
+signature can be viewed as a finite bottom-up tree automaton.
+
+{\bf Definition 6. (Order-Sorted Signature)}
+{\sl An {\em order-sorted signature}
+\index{order-sorted signature|ii}
+\index{signature!order-sorted|ii}
+is a triple
+$(S, \leq, \Sigma)$, where $S$
+is a set of sorts, $\leq$ a preorder on \index{preorder}
+$S$, and $\Sigma$ a family}
+$$\{ \Sigma_{\omega,\sigma} \mid \omega \in S^*, \: \sigma \in S \}$$
+{\sl of not necessarily disjoint sets of operator symbols.}
+
+{\sl If $S$ and $\Sigma$ are finite, the signature is called finite.}
+
+{\sl For notational convenience, we often write $f: (\omega)\sigma $
+instead of $f \in \Sigma_{\omega,\sigma}$; $(\omega)\sigma$ is called
+an {\em arity} \index{arity|ii} and $f: (\omega)\sigma$ a {\em
+declaration}. \index{declaration|ii} The signature $(S, \leq, \Sigma)$
+is often identified with $\Sigma$. If $|\omega|=n$ then $f$ is called
+a $n$-ary operator symbol. \index{operator symbol|ii} $0$-ary
+operator symbols are {\em constant symbols}. \index{constant
+symbols|ii} As in \cite{SmolkaNutt89} we will assume in the following
+that for any $f$ there is only a single $n \in \NN$ such that $f$ is a
+$n$-ary operator symbol.}
+
+{\sl An $\sigma$-sorted variable set is a family}
+$$V= \{V_\sigma \mid \sigma \in S\}$$
+{\sl of disjoint, nonempty sets. For $x \in V_\sigma$ we also
+write $x:\sigma$ or $x_\sigma$.}
+\index{variable set!s-sorted@$\sigma$-sorted|ii}
+
+In \cite{Gogu89} the following monotonicity condition
+must be fulfilled by any order-sorted signature.
+
+{\bf Definition 7.}
+{\sl An order-sorted signature $(S, \leq, \Sigma)$ fulfills the
+{\em monotonicity condition},
+\index{monotonicity condition|ii}
+if}
+$$f \in \Sigma_{\omega_1,\sigma_1} \cap \Sigma_{\omega_2,\sigma_2}
+\mbox{ and } \omega_1 \leq \omega_2 \mbox{ imply } \sigma_1 \leq \sigma_2.$$
+
+Notice that the monotonicity condition excludes multiple declarations
+of constants. This is one of the reasons why we will not assume in
+general that the order-sorted signatures we will deal with will
+fulfill the monotonicity condition.
+
+{\bf Definition 8. (Order-Sorted Terms)}
+\label{defordsortterm}
+{\sl The set of {\em order-sorted terms}
+\index{order-sorted terms!set of|ii}
+of sort $\sigma$ freely generated by $V$,
+$T_\Sigma(V)_\sigma$, is the least set satisfying}
+\begin{itemize}
+\item if $x \in V_{\sigma'}$ and $\sigma' \leq \sigma$,
+then $x \in T_\Sigma(V)_\sigma$
+\item if $f \in \Sigma_{\omega,\sigma'}$, $\omega = \sigma_1 \ldots \sigma_n$,
+ $\sigma' \leq \sigma$ and $t_i \in T_\Sigma(V)_{\sigma_i}$ then
+$f(t_1, \ldots, t_n) \in T_\Sigma(V)_\sigma$.
+\end{itemize}
+
+If $t \in T_\Sigma(V)_\sigma$ we will also write $t:\sigma$.
+
+In contrast to sort-free terms and variables, order-sorted variables
+and terms always have a sort. Terms must be sort-correct, that is,
+subterms of a compound term must be of an appropriate sort as required
+by the arities of the term's operator symbol.
+
+Note that an operator symbol may have not just one arity (as in
+classical homogeneous or heterogeneous term algebras), but may have
+{\em several} arities. As a consequence, each term may have several
+sorts.
+
+The set of all order-sorted terms over $\Sigma$
+freely generated by $V$ will be denoted by
+$$T_\Sigma(V) := \bigcup_{\sigma \in S} T_\Sigma(V)_\sigma .$$
+The set of all
+{\em ground terms} over $\Sigma$ is
+$T_\Sigma := T_\Sigma(\{\})$.
+\index{ground term|ii}\index{term!ground|ii}
+
+{\bf Definition 9. (Regularity)}
+{\sl A signature is {\em regular},
+\index{signature!order-sorted!regular|see{signature!regular}}
+\index{signature!regular|ii}
+if the subsort preorder
+of $\Sigma$ is antisymmetric, and if each term
+$t \in T_\Sigma(V)$ has a least sort.}
+
+The following theorem shows that it is decidable for finite signatures
+whose subsort preorders are anti-symmetric if a signature is regular.
+
+{\bf Theorem 1.}
+\label{thregulcon}
+{\sl A signature $(S, \leq, \Sigma)$ whose subsort preorder
+is anti-symmetric is regular iff for
+every $f \in \Sigma$ and $\omega \in S^*$ the set}
+$$\{\sigma \mid \exists \omega' \geq \omega :
+ f\in \Sigma_{\omega',\sigma} \}$$
+{\sl either is empty or contains a least element.}
+
+\begin{proof}
+See \cite{Smol89}. \qed
+\end{proof}
+
+As an example of a simple non-regular signature, consider
+$$(\{ \sigma_0, \sigma_1, \sigma_2 \}, \;
+ \{ \sigma_1 \leq \sigma_0, \, \sigma_2 \leq \sigma_0\}, \;
+\Sigma_{\varepsilon,\sigma_1}={a}, \Sigma_{\varepsilon,\sigma_2}={a}).$$
+The constant $a$ has two sorts which are incomparable, hence it does
+not have a minimal sort.
+
+{\bf Definition 10.}
+{\sl The {\em complexity\/}
+\index{complexity of a term|ii}
+\index{term!complexity|ii}
+ of a term $t\in T_\Sigma(V)$, $\com(t)$ is inductively
+defined as follows:
+\begin{itemize}
+\item $\com(t)=1$, if $t \in V_\sigma$ or $t \in \Sigma_{\epsilon,\sigma}$
+for some $\sigma \in S$,
+\item if $f \in \Sigma_{\omega,\sigma'}$, $\omega = \sigma_1 \cdots \sigma_n$,
+and $t_i \in T_\Sigma(V)_{\sigma_i}$ then
+$$\com(f(t_1,\ldots,t_n))= \max(\com(t_1), \ldots, \com(t_n)) + 1.$$
+\end{itemize}}
+
+{\bf Definition 11.}
+{\sl A {\em substitution} $\theta$ \index{substitution|ii}
+from a variable set $Y$ into the term algebra $T_\Sigma(V)$ is a mapping from
+$Y$ to $T_\Sigma(V)$, which additionally satisfies
+$\theta(x) \in T_\Sigma(V)_\sigma$ if $x \in V_\sigma$
+(that is, substitutions must be sort-correct). As usual,
+substitutions are extended canonically to
+$T_\Sigma(V)$.
+If $Y=\{x_1, \ldots, x_n\}$ we write
+$\theta=\{x_1 \mapsto t_1, \ldots, x_n \mapsto t_n\}$.
+If $\theta=\{x_1 \mapsto t_1\}$ and
+$t \in T_\Sigma(V)$, then we will write $t[t_1/x_1]$ for
+$\theta(t)$.
+If, for $t, t' \in T_\Sigma(V)$, there is a substitution
+$\theta$ such that
+$t'=\theta(t)$, then $t'$ is called
+an {\em instance} of $t$.\index{instance|ii}
+Similarly, a substitution $\theta'$ is called an
+instance of a substitution
+$\theta$ with respect to a set of variables $W$, written
+$\theta \succeq \theta'[W]$, if there is a substitution
+$\gamma$ such that
+$\theta'(x)=\gamma(\theta(x))$ for all
+$x \in W$.}
+
+{\bf Definition 12.}
+{\sl A {\em unifier}\index{unifier|ii} of a set of equations
+$\Gamma$ is a substitution $\theta$ such that
+$\theta(s)=\theta(t)$ for all equations
+$s=^{\rm ?} t$ in $\Gamma$.
+A set of unifiers $U$ of $\Gamma$ is called {\em complete}
+\index{complete set of unifiers|ii}\index{unifier!complete set|ii}
+(and denoted by CSU),\index{CSU|ii} if for every unifier
+$\theta'$ of $\Gamma$ there exists $\theta \in U$ such that
+$\theta'$ is an instance of
+$\theta$ with respect to the variables in
+$\Gamma$. As usual, a signature is called
+{\em unitary (unifying)},
+\index{unitary unifying|ii}\index{signature!unitary unifying|ii}
+\index{unifying!unitary|ii}
+if for all equation sets $\Gamma$ there is a complete set of
+unifiers containing at most one element;
+it is called {\em finitary (unifying)},
+\index{finitary unifying|ii}\index{signature!finitary unifying|ii}
+\index{unifying!finitary|ii}
+if there is always a finite and complete set of unifiers.}
+
+For non-regular signatures, unifications can be infinitary,
+even if the signature is finite (see e.\,g.\
+\cite[p.~326]{Smol89}, \cite[p.~26]{Wald92}).
+
+{\bf Theorem 2. (Schmidt-Schau{\ss})}
+{\sl In finite and regular signatures, finite sets of equations have
+finite, complete, and effectively computable sets of unifiers.}
+
+\begin{proof}
+See \cite[Theorem~15]{Smol89}.\qed
+\end{proof}
+
+{\bf Definition 13.}
+{\sl A signature $(S, \leq, \Sigma)$ is {\em downward complete}
+\index{downward complete|ii}
+\index{signature!downward complete|ii}
+if any two sorts have either no lower bound or an infimum,
+and {\em coregular}
+\index{coregular|ii}
+\index{signature!coregular|ii}
+if for any operator symbol $f$ and any sort $\sigma \in S$
+the set
+$$D(f,\sigma)=\{ \omega \mid \exists \sigma' \in S \:.\;
+f: (\omega)\sigma' \wedge \sigma' \leq \sigma\}$$
+either is empty or has a greatest element.}
+
+{\bf Definition 14.}
+{\sl Let $(S, \leq, \Sigma)$ be an order-sorted signature.
+It is {\em injective} if for any operator symbol $f$
+the following condition holds:
+$$f: (\omega) \sigma \mbox{ and } f: (\omega') \sigma
+\quad \mbox{ imply }\quad \omega = \omega'.$$
+
+It is {\em subsort reflecting} if for any operator symbol $f$
+the following condition holds:}
+$$f: (\omega') \sigma' \mbox{ and } \sigma' \leq \sigma
+\quad \mbox{ imply } \quad f: (\omega) \sigma \mbox{ for some }
+\omega \geq \omega'.$$
+
+{\bf Theorem 3.}
+{\sl Every finite, regular, coregular, and downward complete signature is
+unitary unifying.}
+
+\begin{proof}
+See \cite[Theorem~17]{Smol89}.\qed
+\end{proof}
+
+{\bf Corollary 3A.}
+{\sl Every finite, regular, downward complete, injective and subsort
+reflecting signature is unitary unifying.}
+
+\subsection{Category Theory}
+
+We will recall some basic definitions from category theory which can
+be found in many books on the topic. Some classical textbooks are
+\cite{Macl91}, \cite{Schu72}. A more recent
+textbook is \cite{Frey90}. In \cite{Ryde88}
+computational aspects are elaborated. The basic concepts of category
+theory can also be found in several books which use category theory as
+a tool for computer science, e.\,g.\ in \cite{Ehri85}.
+
+{\bf Definition 15. (Category)}
+\index{category|ii}
+{\sl A {\em category} $\cat{C}$ consists of a class of
+{\em objects\/} \index{objects|ii}
+$\obj{\cat{C}}$, for each pair
+$(A,B) \in \obj{\cat{C}} \times \obj{\cat{C}}$ a set
+$\mor{\cat{C}}{A,B}$ of {\em morphisms} (or {\em arrows}), written
+$f: A \longrightarrow B$ for
+$f \in \mor{\cat{C}}{A,B}$,
+and a {\em composition}}
+\index{composition|ii}
+\index{ composition@$\circ$|ii}
+\index{arrows!composition of|ii}
+$$\begin{array}{l}
+\circ: \mor{C}{A,B} \times \mor{C}{B,C} \longrightarrow \mor{C}{A,C}\\
+(f: A \longrightarrow B, \; g: B \longrightarrow C)
+\mapsto (g \circ f : A \longrightarrow C)
+\end{array}
+\index{arrows|ii} \index{morphisms|ii} \index{objects|ii}
+$$
+{\sl (more precisely a family of functions
+$\circ_{A,B,C}$ for all objects $A,B,C$) such that the following
+axioms are satisfied:
+\begin{enumerate}
+\item $(h \circ g ) \circ f = h \circ (g \circ f)$
+\hfill {\em (associativity)} \\
+for all morphisms $f,g,h$, if at least one side is defined.
+\item For each object $A \in \obj{C}$ there is a morphism
+${\rm id}_A \in \mor{C}{A,A}$, called
+{\em identity of $A$}, such that we have for all
+$f: A \longrightarrow B$ and
+$g: C \longrightarrow A$ with $B,C \in \obj{C}$\\
+$f \circ {\rm id}_A = f \mbox{ and } {\rm id}_A \circ g =g$
+\hfill {\em (identity)}.
+\index{identity|ii}\index{category!identity|ii}
+\index{ id@${\rm id}_A$|ii}
+\end{enumerate}}
+
+{\sl Frequently we will write}
+$$A \stackrel{f}{\longrightarrow} B$$
+{\sl instead of}
+$$f: A \longrightarrow B .$$
+
+{\bf Definition 16. (Opposite Category)}
+\index{dual category|ii}\index{category!opposite|ii}
+\index{opposite category|ii}\index{category!dual|ii}
+{\sl Let $\cat{C}$ be a category. Then
+$\cat{C}^{\rm op}$, the {\em opposite category} of
+$\cat{C}$, is the category which is
+defined by
+\begin{enumerate}
+\item $\obj{C^{\rm op}}= \obj{C}$,
+\item $\mor{C^{\rm op}}{A,B} =\mor{C}{B,A}$.
+\end{enumerate}
+Sometimes we will call $\cat{C}^{\rm op}$ the
+{\em dual category} of $\cat{C}$.}
+
+Clearly, $\cat{(C^{\rm op})^{\rm op}}= \cat{C}$.
+
+For any categories $\cat{C}$ and $\cat{D}$
+we will write
+$\cat{C} \times \cat{D}$\index{ product@$\times$|ii}
+for the category which is defined by
+\begin{enumerate}
+\item $\obj{C \times D}= \obj{C} \times \obj{D}$,
+\item $\mor{C\times D}{(A,A'),\,(B,B')} =\mor{C}{A,B} \times
+\mor{D}{A',B'}$,
+\end{enumerate}
+where the symbol $\times$ on the right hand side of the equations
+denotes the usual set theoretic Cartesian product.
+
+Since $\times$ is associative, we will write
+unambiguously
+$\cat{C}_1 \times \cdots \times \cat{C}_n$
+for an $n$-fold iteration.
+Moreover,
+$$\cat{C}^n= \underbrace{\cat{C} \times \cdots \times \cat{C}}_{n}.$$
+
+{\bf Definition 17. (Functors)}
+\index{functor|ii}
+{\sl Let $\cat{C}$ and $\cat{D}$ be categories.
+A mapping $F: \cat{C} \longrightarrow \cat{D}$ is
+called {\em functor}, if
+$F$ assigns to each object $A$ in $\cat{C}$
+an object $F(A)$ in $\cat{D}$ and to each morphism
+$f: A \longrightarrow B$ in $\cat{C}$ a morphism
+$F(f): F(A) \longrightarrow F(B)$ in
+$\cat{D}$ such that the following axioms are satisfied:
+\begin{enumerate}
+\item $F(g \circ f) = F(g) \circ F(f)$ for all $g \circ f$ in
+$\cat{C}$,
+\item $F({\rm id}_A) = {\rm id}_{F(A)}$ for all objects $A$
+in $\cat{C}$.
+\end{enumerate}}
+
+{\sl The composition of two functors
+$F: \cat{C} \longrightarrow \cat{D}$ and
+$G: \cat{D} \longrightarrow \cat{E}$
+is defined by }
+$$G \circ F (A) = G(F(A))$$
+{\sl and}
+$$G \circ F (f) = G(F(f))$$
+{\sl for objects and morphisms respectively
+leading to the {\em composite functor}
+$G \circ F : \cat{C} \longrightarrow \cat{E}$.}
+
+{\sl The {\em identical functor}
+${\rm ID}_{\cat{C}}: \cat{C} \longrightarrow \cat{C}$ is
+defined by
+${\rm ID}_{\cat{C}}(A) = A$ and
+${\rm ID}_{\cat{C}}(f) = f$.\index{ ID@${\rm ID}_{\cat{C}}$|ii}}
+
+{\sl A functor $F: \cat{C} \longrightarrow \cat{D}$ is
+also called a {\em covariant functor}
+from $\cat{C}$ into $\cat{D}$.\index{covariant!functor|ii}
+\index{functor!covariant|ii}
+A functor $F: \cat{C}^{\rm op} \longrightarrow \cat{D}$ is
+ called a {\em contravariant functor} from
+$\cat{C}$ into $\cat{D}$.}
+\index{contravariant!functor|ii}
+\index{functor!contravariant|ii}
+
+{\bf Definition 18. (Natural Transformations)}
+\index{natural transformation|ii}
+\index{transformation!natural|ii}
+{\sl Let $S, T :\cat{C} \longrightarrow \cat{D}$
+be functors.
+A {\em natural transformation} $\tau: S \longrightarrow T$
+is a mapping which assigns to any
+object $A$ in $\cat{C}$ an arrow
+$\tau_{A}=\tau (A) : S(A) \longrightarrow T(A)$
+such that for any arrow
+$f: A \longrightarrow B$ in $\cat{C}$ the diagram}
+\begin{center}
+\square[S(A)`T(A)`S(B)`T(B);\tau(A)`S(f)`T(f)`\tau(B)]
+\end{center}
+{\sl is commutative.}
+
+{\bf Defintion 19. (Initial Objects)}
+{\sl Let $\cat{C}$ be a category.
+An object $I \in \obj{C}$ is {\em initial in $\cat{C}$}
+if for any object $A \in \cat{C}$ there is
+a unique morphism $f \in \mor{C}{I,A}$.
+If the category $\cat{C}$ is clear from the
+context, then it is simply said that
+$I$ is an {\em initial object}.}
+
+If an initial object exists in a category, it is uniquely determined
+up to isomorphism.
+
+\subsection{The Type System of Axiom}
+
+The type system of {\sf Axiom} consists of three levels:
+
+\begin{enumerate}
+\item elements,
+\item domains,
+\item categories.
+\end{enumerate}
+
+The elements belong to domains, which correspond to types in the
+traditional programming terminology.
+
+Domains are built by {\em domain constructors}, which are functions
+having the following sort of parameters: elements, or domains of a
+certain category. Any domain constructor has a {\em category
+assertion} part which asserts that for any possible parameters of the
+domain constructor the constructed domain belongs to the categories
+given in it.
+
+\subsubsection{Categories}
+
+Also categories are built by category constructors which are functions
+having elements or domains as parameters.
+
+An important subclass is built by the categories which are built by
+category constructors having no parameters. They are called the {\em
+basic algebra hierarchy} in \cite{Jenk92} and consist up to now of 46
+categories.
+
+As is stated in \cite[p.~524]{Jenk92} the case of elements as
+parameters of category constructors is rare.
+
+In the definition of a category there is always a part in which the
+categories are given the defined category extends.\footnote{The
+category which is extended by all other categories and which does not
+extend any other category is predefined and is called {\tt Type}.}
+
+An important component of the definition of a category is the
+documentation. There is even a special syntax for comments serving as
+a documentation in contrast to other kinds of comments. The
+importance of having a language support for the documentation as well
+as for the implementation of an algorithm is also clearly elaborated
+in the design of the algorithm description language {\sf ALDES}
+\cite{Loos72}, \cite{Loos76}, \cite{Loos92}, and in the
+implementation of the SAC-2 library (see e.\,g.\ \cite{Coll90},
+\cite{Buen91}).
+
+The {\em axioms} which a member of a category has to fulfill are
+stated in the comment only and there is no mechanism for an automated
+verification provided yet. There is a mechanism to declare some
+equationally definable axioms as so called {\em attributes} which can
+be used explicitly in the language. However, the attributes can be
+used only directly. A theorem proving component is not included in
+the system.
+
+Some operations in a category definition can have {\em default}
+declarations, i.\,e.\ algorithms for algorithmically definable
+operations can be given. These default declarations can be overwritten
+by special algorithms in any instance of a category.
+
+
+There are two syntactic declarations which reduce the number of
+category declarations which have to be given considerably.
+
+Using the keyword {\tt Join} a category is defined which has all
+operations and properties of the categories given as arguments to {\tt
+Join}.
+
+Instead of defining different categories ${\cal C}_1$ and ${\cal C}_2$
+and to declare that ${\cal C}_2$ extends ${\cal C}_1$ it is possible
+to define ${\cal C}_1$ and to use the so called {\em conditional
+phrase} {\tt has} in the definition of ${\cal C}_1$ to give the
+additional properties of ${\cal C}_2$.
+
+\begin{figure}
+\rule{\textwidth}{0.1pt}
+%\begin{progverb}
+\begin{footnotesize}
+\begin{verbatim}
+++ the class of all multiplicative semigroups
+++ Axioms
+++ . associative("*":($,$)->$) ++ (x*y)*z = x*(y*z)
+++ Common Additional Axioms
+++ . commutative("*":($,$)->$) ++ x*y = y*x
+SemiGroup(): Category == SetCategory with
+ --operations
+ "*": ($,$) -> $
+ "**": ($,PositiveInteger) -> $
+
+ add
+ import RepeatedSquaring($)
+ x:$ ** n:PositiveInteger == expt(x,n)
+\end{verbatim}
+\end{footnotesize}
+%\end{progverb}
+\rule{\textwidth}{0.1pt}
+
+\caption{An example of a category definition in {\sf Axiom}}
+\label{figaxsegcat}
+\end{figure}
+
+\subsubsection{Coercions}
+
+In {\sf Axiom} it is possible to have coercions between domains.
+Syntactically, an overloaded operator symbol {\tt coerce} is used for
+the definition of the coercion functions. There seems to be no
+restriction on the functions which can be coercions. So also partial
+functions can be coercions in {\sf Axiom} in contrary to the usage of
+the term in this chapter.
+
+\section{Type Classes}
+\label{chtycla}
+
+In the main part of this section we will deal with language constructs
+which correspond to {\em categories} of {\sf Axiom} obeying the
+restriction of having no parameters. In Sec.~\ref{chparamtycl} we
+will discuss the case of categories with parameters.
+
+The momentarily occurring examples of such categories are given as the
+``basic algebra hierarchy'' on the inner cover page of the book on
+{\sf Axiom} \cite{Jenk92}. They consist of 46 categories. The
+maximal length of a chain in the induced partial order is 15.
+
+These categories correspond to type classes of {\sf Haskell}, cf.\
+Fig.~\ref{figaxord} and Fig.~\ref{fighasord}. We will often use the
+term {\em type class} --- which seems to be preferable --- instead of
+non-parameterized category.
+
+In \cite[Appendix~A]{Webe92b} the author has shown that almost all of
+the examples of types occurring in the specifications of the {\sf
+SAC-2} library (see e.\,g.\ \cite{Coll90}, \cite{Buch93}) can be
+structured by using the language construct of type classes.
+
+We will also assume that all domain constructors have only domains as
+parameters, and not elements of other domains. We will discuss the
+extension of having elements of domains as parameters in
+Sec.~\ref{chtydeel}.
+
+\begin{figure}[h]
+\rule{\textwidth}{0.1pt}
+%\begin{progverb}
+\begin{footnotesize}
+\begin{verbatim}
+class (Eq a) => Ord a where
+ (<), (<=), (>=), (>):: a -> a -> Bool
+ max, min :: a -> a -> a
+
+ x < y = x <= y && x /= y
+ x >= y = y <= x
+ x > y = y < x
+
+ -- The following default methods are appropriate for partial orders.
+ -- Note that the second guards in each function can be replaced
+ -- by "otherwise" and the error cases, eliminated for total orders.
+ max x y | x >= y = x
+ | y >= x = y
+ |otherwise = error "max{PreludeCore}: no ordering relation"
+ min x y | x <= y = x
+ | y <= x = y
+ |otherwise = error "min{PreludeCore}: no ordering relation"
+\end{verbatim}
+\end{footnotesize}
+%\end{progverb}
+\rule{\textwidth}{0.1pt}
+
+\caption{Definition of partially ordered sets in the
+{\sf Haskell} standard prelude}
+\label{fighasord}
+\end{figure}
+
+\begin{figure}[h]
+\rule{\textwidth}{0.1pt}
+%\begin{progverb}
+\begin{footnotesize}
+\begin{verbatim}
+++ Totally ordered sets
+++ Axioms
+++ . a** a Boolean ++ The (strict) comparison operator
+ max: ($,$) -> $ ++ The maximum of two objects
+ min: ($,$) -> $ ++ The minimum of two objects
+ add
+ --declarations
+ x,y: $
+ --definitions
+ -- These really ought to become some sort of macro
+ max(x,y) ==
+ x > y => x
+ y
+ min(x,y) ==
+ x > y => y
+ x
+\end{verbatim}
+\end{footnotesize}
+%\end{progverb}
+\rule{\textwidth}{0.1pt}
+
+\caption{Definition of totally ordered sets
+in {\sf Axiom}}
+\label{figaxord}
+\end{figure}
+
+\subsection{Types as Terms of an Order-Sorted Signature}
+
+The idea of describing the types of a computer algebra system as terms
+of an order-sorted signature can also be found in the work of Rector
+\cite{Rect89} and Comon,\ et~al.\ \cite{Como91}. The idea of
+describing the type system of {\sf Haskell} using order-sorted terms
+is due to Nipkow and Snelting \cite{Nipk91}.
+
+However, the combination of ideas found in these papers is new and
+gives a solution to an important class of type inference problems
+occurring in computer algebra.
+
+In the following a {\em type} will just be an element of the set of
+all order-sorted terms over a signature $(S, \leq,\Sigma)$ freely
+generated by some family of infinite sets
+$V=\{ V_\sigma \mid \sigma \in S\}$.
+
+The sorts correspond to the non-parameterized categories, the basic
+algebra hierarchy. The order on the sorts reflects the inheritance
+mechanism of categories.
+
+The sets $V_\sigma$ are the sets of {\em type variables}.
+
+A type denoted be a ground term is called a {\em ground type}, a
+non-ground type is called a {\em polymorphic type}. Polymorphic types
+correspond to the {\em modemaps} of {\sf Axiom}.
+
+A type denoted by a constant symbol will be called a {\em base
+type}. So base types correspond to domains built by domain
+constructors without parameters. (Typical examples are \tf{integer},
+\tf{boolean}, \ldots)
+
+The non-constant operator symbols are called {\em type constructors}.
+The domain constructors of {\sf Axiom} which have only domains as
+parameters can be described by type constructors.
+
+We will use
+$$\begin{array}{l}
+\tf{list}: (\cf{any}) \cf{any}\\
+\tf{list}: (\cf{ordered\_set}) \cf{ordered\_set}\\
+\tf{UP}: (\cf{commutative\_ring} \; \cf{symbol}) \cf{commutative\_ring} \\
+\tf{UP}: (\cf{integral\_domain} \; \cf{symbol}) \cf{integral\_domain}\\
+\tf{FF}: (\cf{integral\_domain}) \cf{field}
+\end{array}$$
+as typical examples, where $\tf{UP}$ builds univariate polynomials in
+a specified indeterminate of a commutative ring, and $\tf{FF}$ the
+field of fractions of an integral domain.
+
+Notice the use of multiple declarations, which can be achieved in
+{\sf Axiom} using the conditional phrase {\tt has}.
+
+In the following we will sometimes assume that we have a {\em
+semantics} for the {\em ground types} which satisfies the following
+conditions:
+\begin{itemize}
+\item The ground types correspond to mathematical objects
+in the sense of universal algebra or model theory
+(A comprehensive reference for universal algebra is \cite{Grae79},
+for model theory \cite{Chan90}).
+\item Functions between ground types are set theoretical
+functions. If we say that two functions $f,g: t_1 \longrightarrow t_2$
+are equal ($f=g$) we mean equality between them as set theoretic
+objects.
+\end{itemize}
+
+Since we only need a set theoretic semantics for {\em ground types}
+and functions between ground types, the obvious interpretations of the
+types as set theoretic objects will do.\footnote{All objects
+corresponding to ground types one is interested in computer algebra
+can be given such a set theoretic interpretation. In other areas,
+e.\,g.\ in the context of the lambda calculus \cite{Bare84} this
+is not always the case. Nevertheless, this is not a real problem for
+our work, since our approach is primarily concerned with the situation
+arising in computer algebra.}
+
+Of course, equality between two functions will be in general an
+undecidable property, but this will not be of importance in the
+following discussion, since we will always give some particular
+reasoning for the equality of two functions between two types.
+
+We will also deal with polymorphic types in the following. However,
+it will not be necessary to have a formal semantics for the
+polymorphic types in the cases we will use them. Giving a semantics
+to polymorphic types can be quite difficult. So the one given in
+\cite{Como91} applies to fewer cases than the ones we are
+interested in. In general, it is possible that no ``set-theoretic
+semantics'' can be given to polymorphic types, as was shown by
+Reynolds \cite{Reyn84} for the objects of the second-order
+polymorphic lambda-calculus.
+
+\subsubsection{Properties of the Order-Sorted Signature of Types}
+\label{secproossty}
+
+The possibility to have multiple declarations of type constructors is
+used in {\sf Axiom} frequently. Syntactically it is achieved by a
+conditional phrase involving {\tt has}.\footnote{In {\sf Axiom}
+conditional phrases are used also for other purposes. So it might be
+useful to use different syntactic concepts instead of one.}
+
+Also constant symbols, i.\,e.\ base types, have usually multiple
+declarations, e.\,g.\ it is useful to declare $\tf{integer}$ to be an
+$\cf{integral\_domain}$ and an $\cf{ordered\_set}$. So the
+monotonicity condition cannot be assumed in general. However, for the
+purposes of type inference (see below) this condition is not needed.
+
+As is shown in \cite[Sec.~5]{Nipk91} it can be assumed that
+the signature is regular\footnote{At least if the signature is
+finite.} and downward complete if one allows to form the
+``conjunction'' $\sigma_1 \wedge \sigma_2$ of sorts $\sigma_1$ and
+$\sigma_2$. This conjunction has to fulfill the following conditions:
+
+\begin{enumerate}
+\item $\sigma_1 \wedge \sigma_2$ has to be the meet of $\sigma_1$ and
+$\sigma_2$ in the free lower semi-lattice on the partially ordered set
+$\langle S, \leq \rangle$ (cf.\ Def. 5).
+\item If a type constructor $\chi$ has declarations $\chi: (\gamma_1
+\cdots \gamma_n) \gamma$ and $\chi: (\delta_1 \cdots \delta_n) \delta$
+then it also has a declaration $$\chi: (\gamma_1 \wedge \delta_1 \:
+\cdots \gamma_n \wedge \delta_n) \gamma \wedge \delta.$$
+\end{enumerate}
+
+Using {\tt Join} there is a possibility to form such conjunctions of
+sort having the required properties in {\sf Axiom}.
+
+\begin{remark} Maybe the choice of the name {\tt Join} in {\sf Axiom}
+is somewhat misleading. Although the {\tt Join} of two categories
+gives a category having the union of their operations, this category
+is nevertheless corresponding to the {\em meet} of the corresponding
+sorts in the lower semi-lattice of sorts of the order-sorted signature
+of types. We cannot simply reverse the order on the sorts. If a type
+belongs to the join of two categories ${\cal A}$ and ${\cal B}$ we can
+conclude that it belongs to ${\cal A}$ (or ${\cal B}$) but not vice versa!
+\end{remark}
+
+For the purpose of type inference it would be nice if the signature is
+unitary unifying. This is the case for regular and downward complete
+signatures if they are also {\em coregular}. However, we do not know
+whether a restriction implying coregularity is reasonable in the
+context of a computer algebra system.
+
+Nipkow and Snelting \cite{Nipk91} have argued that {\sf Haskell}
+enforces that the order-sorted signatures are injective and subsort
+reflecting which also imply that the signature is unitary unifying.
+
+%These assumptions seem to be problematic in the context
+%of computer algebra as the following example shows.
+%Consider the
+%type constructor
+%$\tf{FF}$ building the field of fractions of an integral domain.
+%Then the following declarations --- which reflect certain
+%mathematical facts ---
+%would contradict the assumption that the
+%signature is {\em injective}:
+
+An example of a declaration which would prohibit that the signature is
+{\em injective} is the following. Consider the type constructor
+$\tf{FF}$ building the field of fractions of an integral domain. Then
+the declarations
+$$\begin{array}{l}
+\tf{FF}: (\cf{integral\_domain}) \cf{field}\\
+\tf{FF}: (\cf{field}) \cf{field}
+\end{array}$$
+correctly reflect certain mathematical facts. Although it does not
+seem to be necessary in this example to have the second declaration we
+do not know whether there is an ``algebraic'' reason which implies
+that declarations violating injectivity are not necessary. So this
+point might deserve further investigations.
+
+\subsubsection{Definition of Overloaded Functions}
+
+The formalism developed above is well suited to express the
+overloading which can be performed by category definitions.
+
+A declaration such as
+%\begin{progverb}
+\begin{footnotesize}
+\begin{verbatim}
+AbelianSemiGroup(): Category == SetCategory with
+ --operations
+ "+": ($,$) -> $ ++ x+y computes the sum of x and y
+ "*": (PositiveInteger,$) -> $
+\end{verbatim}
+\end{footnotesize}
+%\end{progverb}
+would translate into
+$$\begin{array}{l}
+\tf{+}: \forall t_{\cf{AbelianSemiGroup}} \, .\,
+ t_{\cf{AbelianSemiGroup}} \times t_{\cf{AbelianSemiGroup}}
+\longrightarrow t_{\cf{AbelianSemiGroup}},\\
+\tf{*}: \forall t_{\cf{AbelianSemiGroup}} \, .\,
+ \tf{PositiveInteger} \times t_{\cf{AbelianSemiGroup}}
+\longrightarrow t_{\cf{AbelianSemiGroup}},
+\end{array}
+$$
+where $t_{\cf{AbelianSemiGroup}}$ is a type variable of sort
+$\cf{AbelianSemiGroup}$. It is bounded by the universal quantifier
+which has to be read that $t_{\cf{AbelianSemiGroup}}$ may be
+instantiated by an arbitrary type of sort $\cf{AbelianSemiGroup}$.
+This is just what we want. So the definition of categories resp.\
+type classes can be seen as a syntactic mechanism to give such
+declarations of overloaded operators. The mechanism to declare that a
+category extends others can be simply modeled by the order relation on
+the sorts in the order-sorted algebra of types --- if there are no
+parameters in category definitions.\footnote{The inheritance mechanism
+is certainly convenient for such a large system as {\sf Axiom} --- as
+we have mentioned before, even the basic algebra hierarchy consists of
+46 categories with chains of maximal length of 15 ---, although it can
+be questioned whether it is really necessary, cf.\
+\cite{Chen92}.}
+
+An advantage of the syntactic form of type classes declarations is
+certainly that the general declaration of the overloaded operators and
+possible {\em default declarations} are collected in one piece of
+code. This collection improves readability and makes clear which
+operators can have defaults and which cannot.
+
+The value of default declarations may not be underestimated. They are
+a good way to support rapid prototyping and will become more important
+the bigger a system grows. They support the possibility to obtain
+algorithms over new structures quite easily. Since it is always
+possible to ``overwrite'' a default operation by a more special and
+efficient one their existence does not contradict the goal of having
+algorithms which are as efficient as possible.
+
+\newsavebox{\fodauxa}
+\newsavebox{\fodauxb}
+\newsavebox{\fodauxc}
+\newsavebox{\fodauxd}
+\newsavebox{\fodauxe}
+\newsavebox{\fodauxf}
+\newsavebox{\fodauxg}
+\newsavebox{\fodauxh}
+\sbox{\fodauxa}{
+\begin{minipage}[l]{0.2\textwidth}
+\hfil \large \sf plus \\
+\it integer \\
+polynomial \\
+matrix
+\end{minipage}
+}
+\sbox{\fodauxb}{
+\begin{minipage}[l]{0.2\textwidth}
+\hfil \large \sf minus \\
+\it integer \\
+polynomial \\
+matrix
+\end{minipage}
+}
+\sbox{\fodauxc}{
+\begin{minipage}[l]{0.2\textwidth}
+\hfil \large \sf times \\
+\it integer \\
+polynomial \\
+matrix
+\end{minipage}
+}
+\sbox{\fodauxd}{
+\begin{minipage}[l]{0.2\textwidth}
+\hfil \large \sf divide \\
+\it integer \\
+polynomial \\
+matrix
+\end{minipage}
+}
+\sbox{\fodauxe}{
+\begin{minipage}[l]{0.2\textwidth}
+\hfil \large \sf integer \\
+\it plus \\
+minus \\
+times \\
+divide
+\end{minipage}
+}
+\sbox{\fodauxf}{
+\begin{minipage}[l]{0.2\textwidth}
+\hfil \large \sf polynomial \\
+\it plus \\
+minus \\
+times \\
+divide
+\end{minipage}
+}
+\sbox{\fodauxg}{
+\begin{minipage}[l]{0.2\textwidth}
+\hfil \large \sf matrix \\
+\it plus \\
+minus \\
+times \\
+divide
+\end{minipage}
+}
+
+\sbox{\fodauxh}{
+\begin{minipage}[l]{0.99\textwidth}
+\begin{center}
+\fbox{\usebox{\fodauxa}}\quad\quad\fbox{\usebox{\fodauxb}}\\
+\vspace{1cm}
+\fbox{\usebox{\fodauxc}}\quad\quad\fbox{\usebox{\fodauxd}}\\
+\vspace{1cm}
+{\large\it Operation-Centered}\\
+\vspace{2cm}
+\fbox{\usebox{\fodauxe}}\quad \fbox{\usebox{\fodauxf}}\quad
+ \fbox{\usebox{\fodauxg}}\\
+\vspace{1cm}
+{\large\it Type-Centered}
+\end{center}
+\end{minipage}
+}
+
+\begin{figure}
+\framebox[\textwidth][l]{
+\hfil\usebox{\fodauxh}
+}
+
+\caption{Some terminology from Foderaro's thesis}
+\label{figfodterm}
+\end{figure}
+
+\label{oopfod}
+
+In his thesis \cite{Fode83} Foderaro distinguishes between an
+``operation centered'' method and a ``type centered'' or
+``object-oriented'' view of organizing data (cf.\
+Fig.~\ref{figfodterm}) and argues why the type-centered approach has
+to be preferred.
+
+However, in our formalism these two views are essentially equivalent.
+There is a translations of a declaration of a type class --- say
+$\cf{Ring}$ --- and an instantiation of it --- say with $\tf{integer}$
+--- with operations $\tf{integer\_plus}$ and $\tf{integer\_times}$
+into declarations
+$$\begin{array}{l}
+\tf{+}: \forall t_{\cf{Ring}} \, .\,
+ t_{\cf{Ring}} \times t_{\cf{Ring}}
+\longrightarrow t_{\cf{Ring}},\\
+\tf{*}: \forall t_{\cf{Ring}} \, .\,
+t_{\cf{Ring}} \times t_{\cf{Ring}}
+\longrightarrow t_{\cf{AbelianSemiGroup}},
+\end{array}
+$$
+whereas it can be deduced by a type inference algorithm that
+$\tf{integer\_plus}$ has to be used for $\tf{+}$ if $t_{\cf{Ring}}$ is
+instantiated with the type constant $\tf{integer}$. We will present
+this inference algorithm in the next section.
+
+\subsection{Type Inference}
+\label{sectytycl}
+
+In the following we will show that the type inference problem is
+decidable. We will sketch the proof which is due to Nipkow and
+Snelting \cite{Nipk91} because of its importance also for
+computer algebra.
+
+In Sec.~\ref{secaxhasex} we will give some examples in which the {\sf
+Axiom} type inference mechanism fails whereas in {\sf Haskell} a type
+can be deduced.
+
+\subsubsection{Type Inference Rules of Mini-Haskell}
+
+In Fig.~\ref{figtyns} the type inference rules for the language
+Mini-Haskell of Nipkow and Snelting \cite{Nipk91} are given. This
+language includes the central typing concepts of {\sf Haskell} but is
+well suited for theoretical investigations since it is very small.
+Many useful properties of an actual programming language can be seen
+as ``syntactic sugar'' for the purpose of the type inference problem.
+
+Mini-Haskell can only handle unary functions. However, in this
+assumption there is no loss in generality. Since Mini-Haskell has
+higher order-functions, a function of type
+$$\tau_1 \times \tau_2 \longrightarrow \tau_3$$
+can be expressed by a function having type
+$$\tau_1 \longrightarrow (\tau_2 \longrightarrow \tau_3),$$
+a technique usually called {\em currying}.\footnote{After Haskell
+B. Curry who has used this technique in his work on Combinatory
+logic. Historically, already Sch\"onfinkel has used it in
+\cite{Scho24}.}
+
+The language does not have explicit recursion or pattern matching.
+Although these are important properties of a programming language,
+there is no loss in generality in the type inference problem if we
+exclude them from the language. There are well known translations of
+pattern matching into expressions of the lambda-calculus, see e.\,g.\
+\cite{Jone87}. In principle, recursion can be expressed using
+fixpoint combinators which only requires to have certain appropriately
+typed functional constants (see e.\,g.\ \cite{Leis87}).
+
+\begin{remark}
+Having explicit recursion and some special typing rules for recursion
+gives the possibility to assign typing to some recursive programs
+which would be ill-typed otherwise (see e.\,g.\ \cite{Kfou88},
+\cite{Tiur90}). However, in some of these systems type inference
+becomes undecidable \cite{Tiur90}, \cite{Kfou93}.
+\end{remark}
+
+\begin{remark}
+The so called ``anonymous functions'' in {\sf Axiom}
+ \cite[Sec.~6.17]{Jenk92} can simply be seen as
+$\lambda$-abstracted expressions.
+Since recursion can be expressed by the use of fixpoint combinators,
+also $\lambda$-expressions without names can be
+recursive,\footnote{Their ``names'' are bound variables!} in contrast
+to the remark in \cite[p.~168]{Jenk92}: ``An anonymous function cannot
+be recursive: since it does not have a name, you cannot even call it
+within itself!''
+\end{remark}
+
+In the following we will use the notation of Nipkow and Snelting
+\cite{Nipk91} which has some syntactic differences to our standard
+notation but should be clear from the context. Since the type of
+functions between $\tau$ and $\tau'$ has a special role in the
+following there is a special notation for it and it is written as
+$\tau \longrightarrow \tau'$. The meta-variable $\chi$ ranges over
+type constructors, where it is assumed that a finite set of them is
+given (e.\,g.\ having $\tf{int}$, $\tf{float}$, $\tf{list}(\alpha)$,
+$\tf{pair}(\alpha,\beta)$ as members, as in \cite{Nipk91}).
+
+Formally, a typing hypothesis $A$ is a mapping from a finite set of
+variables to types. We will write
+$$A+[x\mapsto \tau]$$
+for the mapping which assigns $\tau$ to $x$ and is equal to $A$ on
+${\rm dom}(A) - \{x\}$.\footnote{If $x \in {\rm dom}(A)$ its value
+will be ``overwritten''.} For signatures, the notation
+$$\Sigma + \chi: (\overline{\gamma_n})\gamma$$
+just means that a declaration $\chi: (\overline{\gamma_n})\gamma$
+is added to $\Sigma$.
+
+\begin{figure}[t]
+\newsavebox{\fignsaux}
+\sbox{\fignsaux}{
+\begin{minipage}[l]{0.995\textwidth}
+$$\begin{array}{ll}
+{\rm TAUT} & \frac{\displaystyle A(x) \succeq_\Sigma \tau}{\displaystyle (A, \Sigma) \vdash
+ x: \tau} \\
+\\
+{\rm APP} & \frac{\displaystyle (A, \Sigma) \vdash e_0 :
+\tau \longrightarrow \tau' \quad (A, \Sigma) \vdash e_1 }{\displaystyle (A, \Sigma) \vdash
+\tau' }\\
+\\
+{\rm ABS} & \frac{\displaystyle (A+[x\mapsto \tau], \Sigma)
+\vdash e : \tau}{\displaystyle
+ (A, \Sigma) \vdash \lambda x.e: \tau \longrightarrow \tau}\\
+\\
+{\rm LET} & \frac{\displaystyle (A, \Sigma) \vdash e_0 : \tau \:
+FV(\tau,A)=\{ \alpha_{\gamma_1}, \ldots , \alpha_{\gamma_k} \} \:
+(A+[x\mapsto \forall \overline{\alpha_{\gamma_k}}.\tau], \Sigma)
+\vdash e_1 : \tau'}{\displaystyle (A, \Sigma)\vdash {\bf let} \: x= e_0 \: {\bf in} \:
+e_1 : \tau} \\
+\\
+{\rm CLASS} &\begin{array}{l}
+(A, \Sigma) \vdash {\bf class} \: \gamma \leq \gamma_1, \ldots, \gamma_n \:
+{\bf where} \: x_1: \forall \alpha_\gamma .\tau_1, \ldots, x_k: \forall
+\alpha_\gamma . \tau_k : \\
+\quad\quad (A + [x \mapsto \forall \alpha_\gamma .\tau_i \mid i=1..k],
+\Sigma + \{\gamma \leq \gamma_j \mid j= 1..n\} )
+\end{array} \\
+\\
+{\rm INST} & \frac{\displaystyle
+A(x_i) = \forall \alpha_\gamma . \tau_i \quad\quad
+(A, \Sigma) \vdash e_i: \tau_i[\chi(
+\overline{\alpha_{\gamma_n}})/\alpha_\gamma] \quad\quad i=1..k
+}{\displaystyle (A, \Sigma)\vdash
+ {\bf inst} \: \chi : (\overline{\gamma_n})\gamma
+\: {\bf where} \: x_1 = e_1, \ldots, x_k = e_k : (A, \Sigma +
+ \chi: (\overline{\gamma_n})\gamma)}\\
+\\
+{\rm PROG} & \frac{\displaystyle
+(A_{i-1}, \Sigma_{i-1}) \vdash d_i: (A_i, \Sigma_i) \quad i=1..n
+\quad\quad (A_n,\Sigma_n) \vdash e : \tau}{\displaystyle
+(A_0,\Sigma_0) \vdash d_1; \ldots : d_n; e : \tau}
+\end{array}$$
+\end{minipage}
+}
+
+\framebox[\textwidth][l]{\usebox{\fignsaux}
+}
+
+\caption{The type inference rules for Mini-Haskell of
+Nipkow \& Snelting}
+\label{figtyns}
+\end{figure}
+
+In Fig.~\ref{figtyns} the following conventions are used.
+$\overline{\alpha_{\gamma_n}}$ denotes the list
+$\alpha_{\gamma_1}, \ldots, \alpha_{\gamma_n}$,
+with the understanding that the
+$\alpha_{\gamma_i}$ are distinct type variables. The first four rules
+in the type inference system in Fig.~\ref{figtyns} are almost
+identical to the rules of Damas and Milner for {\sf ML} typing
+\cite{Dama82}. There are two differences: all inferences
+depend on the signature $\Sigma$ of the type algebra as well as the
+set of type assumptions $A$. Furthermore, generic instantiation in
+rule {\rm TAUT} must respect $\Sigma$. This is written
+$\sigma \succeq_\Sigma \tau$
+meaning that $\sigma$ has the form
+$\forall \overline{\alpha_{\gamma_n}}. \tau_0$,
+there are $\tau_i$ of sort
+$\gamma_i$ and $\tau=\tau_0[\tau_1/\alpha_{\gamma_1}, \ldots
+,\tau_n/\alpha_{\gamma_n}]$. The notation $FV(\tau)$ denotes the set
+of free type variables in $\tau$; $FV(\tau,A)$ denotes $FV(\tau) - FV(A)$.
+
+If no class and instance declarations are present, every type
+constructor has the topmost sort as arity.
+
+For a detailed discussion of the rules we refer to \cite{Nipk91}.
+Notice that rule CLASS has no premises. The symbol ``$:$'' has two
+different meanings. On the one hand it assigns a type to an
+expression or a program. On the other hand it assigns a pair
+consisting of a typing hypothesis and a signature to a {\bf class}- or
+{\bf inst}-declaration.
+
+We have presented the simpler form of the type inference system as can
+be found in \cite{Nipk91}. A problem is that the obtained
+order-sorted signature $\Sigma$ need not be regular. However, if we
+allow the formation of the conjunction of two sorts ---- which
+corresponds to the {\tt join} of two categories in {\sf Axiom} ---
+then the signature can be made regular (and downward complete). So we
+can assume w.\,l.\,o.\,g. that the signature is regular, omitting for
+simplicity the slightly more complicated type inference rules for the
+system handling these conjunctions of sorts. For more details we
+refer to \cite{Nipk91}.
+
+The main result of \cite{Nipk91} can be stated in the following form.
+
+{\bf Theorem 4. (Nipkow and Snelting)}
+\label{decMH}
+The type inference problem for Mini-Haskell can be
+effectively reduced to the computation of order-sorted unifiers
+for a regular signature.
+It is thus decidable and there is a finite set
+of principal typings. If the signature is unitary unifying, then
+there is a unique principal type.
+
+%One of the main points is rule
+%(ABS) in which {\bf declaration} ....
+%only overloaded functions as instances of type classes
+
+\subsubsection{Types of Functions}
+\label{secaxhasex}
+
+In this section we want to show that the above results on the type
+system for {\sf Haskell} would allow an extension of the type system
+of {\sf Axiom}.
+
+\begin{figure}[t]
+\rule{\textwidth}{0.1pt}
+%\begin{progverb}
+\begin{footnotesize}
+\begin{verbatim}
+->fac n == if n < 3 then n else n * fac(n-1)
+ Type: Void
+->fac 10
+ (2) 3628800
+ Type: PositiveInteger
+->g x == x + 1
+ Type: Void
+->g 9
+ Compiling function g with type PositiveInteger -> PositiveInteger
+ (7) 10
+
+ Type: PositiveInteger
+->g (2/3)
+ 5
+ (8) -
+ 3
+
+ Type: Fraction Integer
+
+->mersenne i== 2**i - 1
+ Type: Void
+->mersenne
+
+ i
+ (2) mersenne i == 2 - 1
+ Type: FunctionCalled mersenne
+->mersenne 3
+ Compiling function mersenne with type PositiveInteger -> Integer
+
+ (3) 7
+ Type: PositiveInteger
+->addx x == ((y :Integer): Integer +-> x + y)
+ Type: Void
+>g:=addx 10
+ Compiling function addx with type PositiveInteger -> (Integer ->
+ Integer)
+
+ (10) theMap(*1;anonymousFunction;0;G1048;internal,502)
+ Type: (Integer -> Integer)
+\end{verbatim}
+\end{footnotesize}
+%\end{progverb}
+\rule{\textwidth}{0.1pt}
+
+\caption{Typing of some user-defined functions in {\sf Axiom}}
+\label{figusdeffuax}
+\end{figure}
+
+\begin{figure}[t]
+\rule{\textwidth}{0.1pt}
+%\begin{progverb}
+\begin{footnotesize}
+\begin{verbatim}
+fact 0 = 1
+fact (n+1) = (n+1)*fact n
+Phase TYPE:
+fact :: Integral m => m -> m
+\end{verbatim}
+\end{footnotesize}
+%\end{progverb}
+
+\bigskip
+%\begin{progverb}
+\begin{footnotesize}
+\begin{verbatim}
+square x = x * x
+Phase TYPE:
+square :: Num t => t -> t
+\end{verbatim}
+\end{footnotesize}
+%\end{progverb}
+
+\bigskip
+%\begin{progverb}
+\begin{footnotesize}
+\begin{verbatim}
+mersenne i = 2^ i - 1
+addx x = \y -> y+x
+z::Integer
+z=10
+g = addx z
+h = g 3
+
+Phase TYPE:
+mersenne :: (Num tv57, Integral tv58) => tv58 -> tv57
+addx :: Num tv59 => tv59 -> tv59 -> tv59
+z :: Integer
+g :: Integer -> Integer
+h :: Integer
+\end{verbatim}
+\end{footnotesize}
+%\end{progverb}
+\rule{\textwidth}{0.1pt}
+
+\caption{Corresponding typings in {\sf Haskell}}
+\label{figusdeffuha}
+\end{figure}
+
+In {\sf Axiom} it is possible to have functions as objects, see
+\cite[Sec.~6]{Jenk92} and Fig.~\ref{figusdeffuax}. Although {\sf
+Axiom} has the concept of functions as objects and it can usually
+infer the type of objects, it cannot infer the type of functions.
+
+Strictly speaking the inferred types {\tt Void} or
+{\tt FunctionCalled mersenne} in Fig.~\ref{figusdeffuax}
+are false, since they differ from the types when the functions are
+explicitly typed by the user.
+
+The problem seems to be that {\sf Axiom} can only infer ground types
+and not polymorphic types. For most purposes in computer algebra this
+might be sufficient. However, the type of functions has to be
+polymorphic in many cases.
+
+In Fig.~\ref{figusdeffuha} it is shown that {\sf Haskell} can infer a
+type for such functions. The {\sf Haskell} syntax has to be read as
+follows: {\tt Integral} is a type class to which {\tt Integer}
+belongs. The typing expression for {\tt fact} has to be read as the
+type of {\tt fact} is a function in one argument taking arguments of a
+type in type class {\tt Integral} and returning an argument of the
+same type; the type variable {\tt m} is bound in the expression and is
+chosen arbitrarily.
+
+By Theorem 4 we know that it is decidable whether there is a
+typing of an expression and that there are only finitely many most
+principal typings in the positive case. As is discussed in
+\cite{Nipk91} the restrictions on typings in {\sf Haskell} even imply
+that there is always a single principal type. However, since we do not
+know to what extend these assumptions will be justified in the area of
+computer algebra, we will not claim the more special result stated in
+Theorem 4.
+
+For the purpose of this thesis we can stop at this point, since we are
+interested in questions of typability and not in ones of code
+generation. A certain problem in {\sf Haskell} is that of {\em
+ambiguity}. Although all valid typings of an expression are instances
+of a most general type (involving type variables) it may happen that
+there is not enough information to generate code in an unambiguous
+way. Some discussions and examples of ambiguity can be found e.\,g.\
+in \cite{Huda99}, \cite{Faxe02}, \cite{Nipk91}, However, since
+this problem arises ``below'' the typing level, some new concepts seem
+to be necessary in order to treat this problem formally, and the
+author of this thesis does not know of any such formal approaches.
+
+\subsubsection{A Possible Application of
+Combining Type Classes and Parametric Polymorphism}
+\label{posappcom}
+
+As we have seen, we can extend a type system supporting type classes
+with parametric polymorphism and functions as first-class citizens and
+the type inference problem still remains decidable.
+
+Such an extension of an {\sf Axiom} like type system seems to be
+interesting in the area of computer algebra for several reasons.
+First of all lists play an important role in computer algebra and many
+typing issues related to lists are connected with parametric polymorphism.
+
+But it seems to be possible to have some much further applications.
+As is shown by Rydeheard and Burstall in \cite{Ryde88} it is possible
+to encode many concepts of category theory as types in {\sf ML} and to
+state several constructive properties of category theory as {\sf ML}
+programs. This encoding uses heavily the concepts of parametric
+polymorphism and higher-order functions. This formalism seems to be
+very useful, although there is no perfect correspondence between the
+objects of category theory and the types in {\sf ML}.\footnote{For
+instance, the well-formedness of composites in a category is not a
+matter of type-checking, cf.\ \cite[p.~58]{Ryde88}. Other examples
+can be found in \cite[Sec.~10]{Ryde88}.}
+
+Now there are many well-known interactions between category theoretic
+concepts and algebraic concepts, see e.\,g.\
+\cite[Sec.~II.7]{Macl92} or \cite{Mane76} for interactions
+of equational reasoning and category theory. Since many concepts in
+category theory are constructive, it seems to be possible to use some
+of these connections in a computer algebra system.
+
+\subsubsection{Typing of ``Declared Only'' Objects}
+
+Consider the Axiom dialogue:
+%\begin{progverb}
+\begin{footnotesize}
+\begin{verbatim}
+->a:Integer
+ Type: Void
+
+->a+a a is declared as being in Integer but has not been given a value.
+\end{verbatim}
+\end{footnotesize}
+%\end{progverb}
+
+Although a corresponding construct leads to a program error in {\sf
+Haskell}, it could be typed by the {\sf Haskell} type inference
+algorithm, if a declaration such as \verb!a: Integer! would just add
+the corresponding typing assumption to the set of typing hypothesis.
+
+Thus if we add a type declaration statement to the syntax of
+Mini-Haskell\footnote{We will use {\bf has\_type} as an infix
+operation in the object language for the typing declaration instead of
+``:'' in order to distinguish between the object and the meta level in
+rule {\rm (TYPE-AS)}.}
+$$ x\: {\bf has\_type }\: \tau,$$
+then we simply need to add the following trivial rule to the ones given in
+Fig.~\ref{figtyns}:
+$$\mbox{(TYPE-AS)} \quad\quad {\displaystyle (A,
+\Sigma)\vdash x \:{\bf has\_type}\: \tau : (A+[x \mapsto \tau],
+\Sigma)} $$
+
+\subsection{Complexity of Type Inference}
+
+\subsubsection{The ML-fragment}
+
+The type inference problem for the simply typed lambda calculus,
+i.\,e.\ the {\sf ML} core language without usage of {\tt let}
+constructions reduces in linear time to a (syntactic) unification
+problem. Using a representation of terms as directed acyclic graphs
+(dags) the unification problem is decidable in linear time
+\cite{Pate78}, and so is the type inference problem.
+
+In \cite[p.~450]{Kane90} this result is stated in the
+following precise form:
+\begin{quote}
+Given a {\tt let}-free expression $M$ of length $n$ (with all bound
+variables distinct), there is a linear time algorithm which computes a
+dag representation of the principal typing of $M$, if it exists, and
+returns {\em untypeable} otherwise. If it exists, the principal
+typing of $M$ has length at most $2^{O(n)}$ and dag size $O(n)$.
+\end{quote}
+
+Even if {\tt let}-expressions are used, the type inference problem
+remains decidable and can be solved using the Damas-Milner algorithm
+\cite{Dama82}. Unfortunately, the complexity becomes dramatically
+worse. In the worst case, doubly-exponential time is required to
+produce a string output of a typing. Using a dag representation the
+algorithm can be modified to run in exponential time, which is also
+the proven lower (time complexity) bound of the problem (see e.\,g.\
+\cite{Kane90}).
+
+Nevertheless, {\sf ML} typing appears to be efficient in practice,
+although {\tt let} expressions are frequently used in actual {\sf ML}
+programs.\footnote{We refer to \cite{Kane90} for further
+discussions of this point.}
+
+\subsubsection{Complexity of Type Inference for the System of Nipkow
+and Snelting}
+
+If no {\tt let} expressions are used, then the type inference problem
+for the system of Nipkow and Snelting can be reduced to an unification
+problem for order-sorted terms.
+
+This reduction is linear, so the inherent complexity of the problem is
+the same as the one of corresponding unification problem.
+
+However, the resulting signature need not be regular. By introducing
+``conjunctive sorts'' Nipkow and Snelting show how the signature can
+be made regular. This process consists of building new sorts for any
+finite subset of the set of sorts introduced by the {\tt class} and
+{\tt inst} declaration. This construction is thus exponential in the
+number of {\tt class} and {\tt inst} declaration of the program.
+
+The unification problem for regular order-sorted signatures is
+decidable. However, in finite and regular signatures, deciding
+whether an equation is unifiable is an NP-complete problem (see
+\cite[Corollary~10]{Smol89}).
+
+The situations is much better, if the signature is also coregular and
+downward complete, since in this case unification has quasi-linear
+complexity \cite[Theorem~18]{Smol89}.
+
+Since for many programs of the system the {\tt class} and {\tt inst}
+declarations are the same, the type inference problem is of feasible
+complexity if the obtained signature is coregular\footnote{By
+construction, it is regular and downward complete.} and we view this
+signature as pre-computed.
+
+Of course, if {\tt let} statements are used, a lower bound bound for
+the complexity is exponentially. The complexity of various type
+systems for {\sf Haskell}-like overloading has been investigated in
+\cite{Volp91}.
+
+\subsection{Algebraic Specifications of Type Classes}
+
+Many important classes of objects occurring in computer algebra can be
+defined by a finite set of equations, e.\,g.\ monoids, groups, Abelian
+groups, or rings.
+
+So the corresponding type class can be specified by an algebraic
+specification (see e.\,g.\ \cite{Ehri85}, \cite{Wirs91}) if we use the
+class of all models of the specification as the semantics of the
+specification, which is usually called the {\em loose semantics}.
+
+\begin{remark}
+Usually, an algebraic specification is thought to specify abstract
+data types in the sense {\sf Axiom} or {\sf Haskell}. So very often
+the {\em initial semantics} is used, i.\,e.\ the specified object is
+the initial object in the category\footnote{Category in the category
+theoretic sense!} of structures being models of the specification. A
+major advantage of this view is that many structures one is interested
+in --- e.\,g.\ the rational numbers, stacks, queues, \ldots --- can be
+specified by (sorted or order-sorted) equations. A characterization
+of structures which can be specified by the initial semantics can be
+found in \cite{Hodg95}.
+\end{remark}
+
+So much of the work on algebraic specifications using the loose
+semantics are relevant for the specification type classes. Many
+references to such work are given in the survey of
+Wirsing~\cite{Wirs91}.
+
+\subsubsection{Some Hard-to-Specify Structures}
+
+Unfortunately, some very basic structures, namely integral domains
+(and fields) cannot be specified by equations, even if we allow
+equational implications. This is a consequence of the following
+simple fact.
+
+{\bf Lemma 4.}
+{\sl The class of integral domains is not closed under the formation of
+products.}
+
+\begin{proof}
+Let $A$, $B$ be two arbitrary integral domains (of
+cardinality $\geq 2$). Let $0 \neq a \in A$ and $0 \neq b \in B$.
+Then $(a,0) \cdot (0,b) = (0,0)=0_{A \times B}$, i.\,e.\ the product
+$A \times B$ has zero divisors. \qed
+\end{proof}
+
+The following well known theorem shows the problem.
+
+{\bf Theorem 5.}
+{\sl A class $V$ of algebras\footnote{Algebra in the sense
+of universal algebra.} is definable by equational implications iff $V$
+is closed under the formation of isomorphic images, products,
+subalgebras, and direct limits.}
+
+\begin{proof}
+See \cite[p.~379]{Grae79}. \qed
+\end{proof}
+
+Combining these results we obtain our claim.
+
+{\bf Corollary 5A}
+{\sl The class of integral domains is not definable by equational
+implications.}
+
+Since the technique of conditional term rewriting systems handles
+reasoning for equational implications (cf.\ \cite[Sec.~11]{Klop90},
+\cite{Ders89}) even this powerful technique is to weak to be used as a
+mechanical tool for the specification of these examples.\footnote{At
+least, if we do not allow some coding of information.}
+
+Clearly, integral domains or fields can be defined by a finite set of
+first-order formulas. Unfortunately, it is not possible to define
+them by Horn clauses, which would be one of the next classes of more
+powerful specification formalisms which are well known (cf.\
+\cite{Wirs91}) and have a much better computational behavior than
+arbitrary first-order formulas.\footnote{The success of {\sf PROLOG}
+as a programming language is partly due to this fact.}
+
+{\bf Proposition 1.}
+{\sl Let ${\cal M}$ be a model-class of a first-order theory. If ${\cal
+M}$ is not closed under products, then the first-order theory of
+${\cal M}$ cannot be axiomatized by a set of Horn sentences.}
+
+\begin{proof}
+The claim follows immediately from the fact that Horn
+sentences are preserved under direct products (see e.\,g.\
+\cite[Prop.~6.2.2]{Chan90}). \qed
+\end{proof}
+
+Though most of the examples given as the ``Basic Algebra Hierarchy''
+in \cite{Jenk92} can be seen as model classes of finite sets of
+first-order sentences, there are some which are model classes of a set
+of first-order sentences --- even if we allow infinite sets. An
+example is the category $\cf{Finite}$.
+
+{\bf Lemma 5.}
+{\sl There is no set of first-order sentences whose model
+class is the class of all finite sets.}
+
+\begin{proof}
+If a set of first-order sentences has finite models of
+arbitrary large finite cardinality, then it also has an infinite
+model. \qed
+\end{proof}
+
+\begin{remark}
+In \cite{Dave90} it is shown that there are several quite simple
+operations in basic classes (such as integral domains) which cannot be
+defined constructively although they can be easily specified. So the
+meaning of a certain type class given there is that of a collection of
+all domains in which all the specified operations can be interpreted
+constructively. In \cite{Dave91} the technique of introducing classes
+in which a operation can be defined constructively is applied to the
+problem of factorization of polynomials.
+\end{remark}
+
+\subsubsection{Algebraic Theories}
+
+So it seems to be a wise decision in the design of {\sf Axiom} to
+distinguish between ``axioms'' which are only stated in comments and
+give the intended meaning of an {\sf Axiom} category as a class of
+algebraic structures and ``attributes'' that can be ``explicitly
+expressed'' \cite[p.~522]{Jenk92}.
+
+The parts which can be explicitly expressed by the {\sf Axiom} system
+consists of equational properties only and are even a small subset of
+them. Applying the rich machinery of algebraic specifications
+techniques seems to be a possibility to extend the properties that are
+``explicitly expressed'' considerably.
+
+Moreover, there are many well known specifications of structures which
+are present as domains in {\sf Axiom}. It seems to be an interesting
+field of further research to clarify the interaction between
+algebraically specified categories and algebraically specified
+domains.
+
+The following extension of the work of Rector \cite{Rect89} is a
+first approach in this direction: Assume that only finitely many sorts
+and operation symbols are used for the specification ${\cal D}$ of a
+certain domain and of the specification ${\cal C}$ of a certain type
+class. We can use different semantics as the initial semantics for
+the specification of the domain and the loose semantics for the
+specification of the type class. Then it can be deduced automatically
+whether the domain is a member of the type class in the following way:
+Generate the finitely many mappings which are potentially a view of
+${\cal D}$ as ${\cal C}$ and check algorithmically whether this
+mapping is a view.\footnote{We refer to \cite[p.~303]{Rect89} for
+the precise definitions of the used terms.} The possibility of giving
+certain specifications an initial semantics and of giving others a
+loose semantics is also built in {\sf OBJ} (cf.\ \cite{Wirs91},
+\cite{Gogu92}). The former are called {\em objects}, the
+latter {\em theories} and there is the possibility to define certain
+mappings as views quite in the sense of above. However, the definition
+of views has ``documentation aspect''. A verification that a given
+mapping is a view is not implemented (cf.\ \cite[Sec.~4.3]{Gogu92}).
+
+As we have seen it is not possible to specify all structures used in a
+computer algebra system by equations. Their are several possibilities
+to overcome this problem:
+\begin{enumerate}
+\item Use more powerful specification techniques.
+\item Do not specify all structures
+{\em ab initio}, but take some of the structures as given.
+\end{enumerate}
+
+The first possibility is used in \cite{Limo92}. There the
+framework of first-order logic was chosen for the specification of
+structures arising in computer algebra. However, as we have shortly
+discussed, even this framework cannot handle all interesting cases.
+
+Moreover, for an efficient system it is necessary that certain parts
+of a system have to be implemented by algorithms which are not the
+result of a formal specification. So the combination of taking
+certain parts as given and using equational reasoning for the formal
+part whose computational behavior is much better than the one of more
+powerful techniques seems to be a promising compromise between two
+contradicting requirements.
+
+Another advantage of this approach is that already much is known about
+mathematical structures which can be specified in this way as e.\,g.\
+the book by Manes on ``Algebraic Theories'' \cite{Mane76} shows:
+\begin{quote} The program of this book is to define for a ``base
+category'' ${\cal K}$ --- a system of mathematical discourse
+consisting of objects whose structure we ``take for granted'' ---
+categories of ${\cal K}$-objects with ``additional structure,'' to
+prove general theorems about such algebraic\footnote{Here
+``algebraic'' means equationally definable.} situations, and to
+present examples and applications of the resulting theory in diverse
+areas of mathematics. \end{quote}
+
+\subsubsection{Type Classes with Higher-Order Functions}
+
+Type inference remains decidable for a system with type classes even
+if higher-order functions are allowed in the way they are in {\sf
+Haskell}. As we have shown in Sec.~\ref{posappcom} such a combination
+is interesting for computer algebra systems.
+
+In order to specify such a system algebraically it is necessary to
+extend the concepts of first-order algebraic specifications techniques
+with higher-order constructs. Some investigations of such
+combinations are done in \cite{Brea89a} and in \cite{Joua91}. The
+results given there show that such a combination has feasible
+properties, e.\,g.\ confluence and termination properties of the
+first-order part are preserved when some reasonable conditions are
+fulfilled.
+
+\subsection{Parameterized Type Classes}
+\label{chparamtycl}
+
+In {\sf Axiom} categories can be parameterized. The occurring
+examples can be distinguished in several ways. On the one hand there
+is the distinction between domains and elements as parameters. On the
+other hand there are several other distinctions based on more
+``semantical'' considerations.
+
+Some parameterized type classes simply arise because the classes of
+algebraic objects should be described as being parameterized, e.\,g.\
+vector spaces over a field $K$, or more generally, left- or
+right-modules over a ring $R$.
+
+An example of a category having an element as a parameter is
+%\begin{progverb}
+\begin{footnotesize}
+\begin{verbatim}
+PAdicIntegerCategory(p): Category == Definition where
+ ++ This is the category of stream-based representations of
+ ++ the p-adic integers.
+\end{verbatim}
+\end{footnotesize}
+%\end{progverb}
+It describes all domains implementing the $p$-adic integers for a
+given integer $p$.
+
+\label{paramtyiso}
+\sloppy This is an example of a class of categories
+used quite frequently in {\sf Axiom}. The mathematical structures
+corresponding to the domains which belong to the category {\tt
+PAdicIntegerCategory(p)} are all isomorphic! The reason for
+introducing such a category seems to be the following. For different
+computations it is useful to have different representations of the
+$p$-adic integers in a system. %\fussy
+
+\label{secisomor}
+The occurrence of categories in which all members are isomorphic (seen
+as mathematical structures) are not limited to categories having
+elements as parameters at all. Examples of others are
+
+\begin{center} {\tt \begin{tabular}{l} UnivariatePolynomialCategory(R:
+Ring) \\ QuotientFieldCategory(D: IntegralDomain)\\
+UnivariateTaylorSeriesCategory(Coef)\\
+UnivariateLaurentSeriesCategory(Coef)\\
+SquareMatrixCategory(ndim,R,Row,Col) \end{tabular} }
+\end{center}
+
+However, the case of elements as parameters for categories --- which
+is claimed to be rare in \cite[p.~524]{Jenk92} --- seems to be
+restricted to such categories.\footnote{This was the result of an
+incomplete check of the source code of {\sf Axiom} by the author.}
+
+It seems to be useful to treat this class of type classes by a new
+concept and not only as a special case of the general one of type
+classes. The reason is the following: Formally, these type classes
+correspond exactly to the concept of abstract data type in the sense
+of algebraic specification as is e.\,g.\ defined by Wirsing
+\cite{Wirs91}. Since the initial and the loose semantics
+coincide\footnote{We will assume that there are only at most countable
+structures as members of a certain class. Most properties we are
+interested in are still valid if we look at the subclasses of classes
+which consist of at most countable structures, cf.\ \cite{Hodg95}.}
+the distinction between first-order and second-order types becomes a
+problem. However, such a distinction is very desirable, as we will
+show below.
+
+\subsubsection{Sequences}
+\label{chapseq}
+
+In {\sf Axiom} the operator $\tf{map}$ is defined by a simple
+overloading for several cases, such as matrices, vectors, quotient
+fields, \ldots
+
+Using a parameterized type constructor $\tf{sequence}$ as in
+\cite{Chen92} this form of ad-hoc polymorphism in {\sf Axiom}
+could be changed to a form of type-class polymorphism. A
+parameterized category such as $\tf{HomogeneousAggregate}$ of the
+``data structure hierarchy'' of {\sf Axiom} seems to have almost the
+same intended meaning as $\tf{sequence}$. So it seems to be possible
+even in {\sf Axiom} to define $\tf{map}$ in
+$\tf{HomogeneousAggregate}$ and to have the algebraic examples as
+instances. In Sec.~\ref{s43} we will use this view in order to show
+that many coercions will fulfill a condition that leads to a coherent
+type system.
+
+\subsubsection{Type Inference}
+
+In \cite{Chen92} an extension of the type system of {\sf Haskell}
+is given allowing {\em types} as arguments in type classes. It is
+then proved that the type inference problem for parameterized type
+classes is decidable.
+
+As we have argued above a restriction of category constructors to have
+domains as parameters only in {\sf Axiom} does not seem to be a severe
+restriction for the type system of {\sf Axiom}. In
+Sec.~\ref{undetychtydeel} we will show that not only type inference
+but even type checking for a system having types depending on elements
+is undecidable. The proof of undecidability given there can be easily
+applied to the case of categories having elements as parameters. So
+it seems to be useful not to allow elements as parameters for category
+constructors.
+
+A certain problem in the proof given in \cite{Chen92} is that an
+entirely new technique is used which cannot be seen as an extension of
+the approach of Nipkow and Snelting using order-sorted unification.
+However, such an extension would be desirable. Since we have to add
+other typing constructs to the language, it is desirable to have a
+well understood theory behind one aspect of the typing problem instead
+of using ad-hoc approaches.
+
+Smolka \cite{Smol88}, \cite{Smol89a} extends the framework of
+order-sorted algebras by introducing functions having sorts as
+parameters. So if we were looking at category constructors which take
+categories as arguments we could directly apply the results of Smolka.
+However, it is not clear whether these results are also useful for the
+cases we are interested in.
+
+\subsubsection{Algebraic Specifications of Parameterized Type Classes}
+
+As in the case of type classes, any specifications using the loose
+approach can be seen as specifications of parameterized type classes.
+In the survey of Wirsing \cite{Wirs91} the relevant literature is
+cited. Especially, in \cite{Wirs82} the important {\em pushout
+construction} for parameterized specifications has been studied.
+
+\subsection{Type Classes as First-Order Types}
+
+Categories in the type system of {\sf Axiom} resp.\ type classes in
+the one of {\sf Haskell} are second-order types.
+
+By our general assumption first-order types have to correspond to
+structures in the sense of model theory or universal algebra.
+
+We will briefly discuss to what extend this assumption is justified in
+various areas.
+
+\subsubsection{Group Theory}
+\label{sgroupth}
+
+As the {\sf Axiom} library shows the assumption of types corresponding
+to mathematical structures makes good sense for many objects of
+computer algebra with the exception of group theory programs. In a
+group theory program many algorithms take certain groups as input and
+return other groups --- very often subgroups --- as output. So it is
+reasonable to have the groups an algorithm works on as objects and not
+as types in a program. In this cases it seems to be more natural to
+treat certain classes of groups, such as the finitely presented
+groups, as a type, and not the groups themselves. Many of the
+algorithms of group theory depend on such a view of groups as objects.
+In this way groups are implemented in the group theory program
+{\sf GAP} \cite{GAPx17}.
+
+Some group theoretical functions can be found in general purpose
+computer algebra programs such as {\sf MAPLE} (see e.\,g.\
+\cite[Sec.~4.2]{Char91a}) or {\sf Axiom} (see e.\,g.\
+\cite[App.~E]{Jenk92}). However, these are rather limited in power
+and coverage compared to the special group theory programs which have
+been developed in the last years ({\sf Cayley} \cite{Butl90},
+{\sf GAP} \cite{GAPx17}).
+
+The observation above shows that it is difficult to come up with a
+design which can really integrate group theoretical algorithms and the
+ones of other areas of computer algebra. This problem can even be
+seen within {\sf Axiom}. For instance, there are domains of
+permutation groups defined in {\sf Axiom}. However, these domains are
+not members of the {\sf Axiom} category $\cf{group}$!
+
+On the other hand it would be very desirable if some results of such
+group theoretic computations can be seen as types for other
+computations --- such as the group of integers $\langle \ZZ, +
+\rangle$ or the finite cyclic groups $\langle \ZZ_m, + \rangle$.
+
+Of course, if types become objects, then second-order types become
+first-order types. Nevertheless, the problem which has to be solved
+is that of the relationship between objects and types, and not that of
+the relationship between types and type classes!\footnote{See
+Chapter~\ref{chtydeel} of this thesis for further discussions.}
+
+\subsubsection{Requirements of a System}
+
+If types are structures, then the type classes correspond to model
+classes of certain theories. Can we assume that such model classes do
+not appear as objects we will deal with?
+
+Of course, as we have shown it makes good sense to view a type class
+as an algebraic object, namely the free term-algebra of order-sorted
+terms of the sort of the type class.
+
+However, even if we model those order-sorted algebras within our
+system there is no need to view type classes as first-order types, as
+long as we use ``isomorphic copies'' of them. So we can even write
+e.\,g.\ a compiler or a type inference algorithm in our system using
+functions defined for those algebras.
+
+The only thing we cannot model type safe are ``run-time'' interactions
+between such a compiler and an algebraic algorithm. But having
+systems which use self-modifying code is anyway contradicting the
+software-engineering principles we want to support by a type system.
+
+As we have shown in Sec.~\ref{secisomor} there are several type
+classes whose members are all isomorphic. For reasons of efficiency
+it is certainly necessary to distinguish these different members and
+to provide different type constructors for them, such as having a type
+constructor for univariate polynomials in sparse representation and
+another one for univariate polynomials in dense representation.
+
+However, it might be useful on the level of a user interface to have
+only a {\em type constructor} ``univariate polynomial'' available for
+the user without forcing him to choose a particular
+representation.\footnote{Contrary to a person implementing algorithms
+a user may be uncertain about the advantages of a particular
+representation so that the choice be the system might be better than
+the one of the user.} In this case a {\em category constructor}
+univariate polynomial would become a {\em type constructor} inducing
+that certain type classes become first-order types.
+
+Nevertheless, this seems to be useful only on the level of a user
+interface and seems to be restricted to cases in which the isomorphism
+between the types can be implemented in the system. Since such
+categories can be seen as (finite) equivalence classes in the coercion
+preorder (cf.\ Sec.~\ref{chtyiso}), these equivalence classes could be
+easily implemented by a new special concept. Then there would still be
+a clear distinction between first-order types (which would include the
+constructs describing the equivalence classes) and the second-order
+types of type classes.
+
+\subsubsection{Universal Algebra}
+
+In universal algebra, there are constructions which would imply the
+view of type classes as first-order objects. Namely, as in
+\cite[Sec.~24]{Monk76}, one can construct for a class {\bf K} of
+algebras the class {\bf S\,K} of substructures, or the class {\bf
+P\,K} of products or the class {\bf H\,K} of homomorphic images of
+{\bf K}.\footnote{More precisely, the class of structures which are
+{\em isomorphic} to substructures (or products, or homomorphic images)
+of elements of {\bf K}.} Then many theorems can be stated as an
+equation, e.\,g.\ Birkhoff's theorem has the form $$\mbox{{\bf K} is a
+variety iff } {\bf K} = {\bf HSP\,K}.$$ Although such a formulation is
+certainly elegant, it does not seem to be really necessary. So the
+additional difficulties which arise if one has to allow that type
+classes are members of the ``equality type class'' do not seem to be
+justified by the practical importance of such a construction.
+
+In model theory the possibility of imposing an algebraic structure ---
+e.\,g.\ the Lindenbaum algebra --- or a topological structure on sets
+of formulas is used frequently. Via the correspondence between sets of
+formulas and model classes such a structure can also be imposed on a
+model class making it to an algebra or a topological space. However,
+since the properties on the side of the set of formulas are more
+useful people work with them and not with the model classes. Many
+books on model theory can serve as references for these remarks, some
+comprehensive ones are \cite{Chan90}, \cite{Poiz85}.
+
+\subsubsection{Category Theory}
+
+The situation is different for category theory. An important tool for
+category theory is the possibility to have a category of all
+(small)\footnote{Small means that the categories are sets in a set
+theory and not proper classes.} categories as objects and the functors
+as arrows, or having functor categories, etc.
+
+
+In this case it is not possible to have a perfect correspondence
+between types and type classes in our system and the objects of
+category theory. More generally, it is not possible to have such a
+perfect correspondence between the concepts of category theory and a
+{\em predicative}\footnote{The word ``predicative'' refers to the fact
+that a universe of types is introduced only after all of its members
+are introduced.} type-theory such as Martin-L\"of's type theory
+\cite{Mart80}, as is also discussed in \cite[Sec.~10]{Ryde88}.
+This is certainly a problem since impredicative
+type theories might have unwanted properties.
+Impredicative variants of Martin-L\"of's system
+can have an undesirable computational behavior,
+as is discussed e.\,g.\ in \cite{Meye86},
+\cite{Howe87}, \cite{Coqu86}.\footnote{This problem
+is discussed in the literature under the names
+{\em ``Type: Type''} --- referring to the problem
+whether the collection of all types is a type ---
+or {\em Girard's Paradox}, since Girard has shown in his thesis
+\cite{Gira72} that the original version of Martin-L\"of's type theory
+allowing such constructs is inconsistent with intuitionistic
+mathematics which it was supposed to model.}
+
+So it might be preferable to have a type system which allows some
+modeling of category theory but not a perfect correspondence.
+
+\subsubsection{Bounded Polymorphism}
+
+So in the main area of computer algebra there seems to be no need for
+a concept of type classes as first-order types. So we will only sketch
+some language proposals in which such a concept could be modeled. The
+main idea is to have first-order types as ``bounds'' to polymorphic
+constructs.
+
+The notion of {\em bounded quantification} was introduced by Cardelli
+and Wegner \cite{Card85} in the language Fun. This proposed
+language integrated Girard-Reynolds polymorphism \cite{Gira72},
+\cite{Reyn74} with Cardelli's first-order calculus of subtyping
+\cite{Card88}.
+
+\begin{remark}
+The so called ``second-order polymorphic $\lambda$-calculus'' was
+rediscovered independently by Reynolds \cite{Reyn74} as a formalism to
+express ``polymorphism'' in programming languages. Girard has
+introduced his system $F$ as a proof theoretic tool to give a
+consistency proof for second-order Peano arithmetic along a line of
+proof theoretic research which has originated with G\"odel
+\cite{Gode58}. A proof that all $\lambda$-terms typeable in system
+$F$ are strongly normalizable and that this theorem implies the
+consistency of second-order Peano arithmetic can be found in the book
+by Girard, et~al.\ \cite{Gira89}.
+\end{remark}
+
+Fun and its relatives have been studied extensively by programming
+language theorists and designers. A slight modification of this
+language --- called {\em minimal Bounded Fun} or $F_\leq$ --- by
+Curien and Ghelli was extensively studied by Pierce in his thesis
+\cite{Pier91}. Unfortunately, the type checking problem for this
+language was proven to be undecidable by Pierce \cite{Pier91},
+\cite{Pier91a}.
+
+Syntactically, types can have the form
+$$\forall \alpha \leq \sigma_1 \, . \, \sigma_2,$$
+where $\alpha$ is a type variable and $\sigma_1$ and $\sigma_2$ are types.
+Besides the usual rules asserting reflexivity and transitivity of
+$\leq$ the following rule is essential:\footnote{For a detailed
+discussion of the rules we refer to the thesis of Pierce
+\cite{Pier91}.}
+$$\frac{\Gamma \vdash \tau_1 \leq \sigma_1
+\quad\quad
+\Gamma, \alpha \leq \tau_1 \vdash \sigma_2 \leq \tau_2
+}{\Gamma \vdash \forall \alpha \leq \sigma_1 \, . \, \sigma_2 \:
+\leq \:
+\forall \alpha \leq \tau_1 \, . \, \tau_2
+} \eqno\mbox{(\sc Sub-All)}$$
+The expressiveness of the language\footnote{Since type checking is
+undecidable, it might be too expressive.} comes from the fact that
+first-order types are bounds for type variables. The rule
+$$x \in V_{\sigma'} \mbox{ and }\sigma' \leq \sigma
+\: \Longrightarrow \: x \in T_\Sigma(V)_\sigma$$
+constituting a part of the definition of order-sorted terms (cf.\
+Def. 8) can be seen as a special form of rule ({\sc
+Sub-All}) if one would restrict the system $F_\leq$ to cases which
+distinguish between two kinds of types where only one kind is allowed
+to be a bound. The typing rules for Mini-Haskell (cf.\
+Fig.~\ref{figtyns}) could be simulated by the typing rules for
+$F_\leq$ using a similar distinction between types.
+
+We will not develop a formal interpretation of Mini-Haskell in
+$F_\leq$ which could be done along the lines sketched above because it
+is not clear yet whether the additional expressiveness of $F_\leq$ is
+useful for a computer algebra system or an extension by another system
+would be more appropriate.
+
+{\bf Relation to Object-Oriented Programming}
+
+There has been a lot of work in the last years to show how the notions
+of {\em object-oriented programming}\footnote{Some books on
+object-oriented programming and languages are \cite{Meye88},
+\cite{Gold83}, \cite{Kirk89}, \cite{Birt80},
+\cite{Stro95}.} can be modeled in a type safe way by using
+$F_\leq$ or a related system like the so called $F$-bounded
+polymorphic second-order lambda calculus \cite{Cann89}. Some
+experimental languages based on such principles are {\sf TOOPL}
+\cite{Bruc93} and {\sf Quest} \cite{Card91}.
+
+As is argued e.\,g.\ in \cite{Limo92}, \cite{Temp92} and can
+be seen by a language for symbolic computation as {\sf VIEWS}
+\cite{Abda86} the principles of object-oriented programming
+are important tools for the design of a computer algebra system.
+
+However, as we have shown in Sec.~\ref{oopfod} and is discussed in
+more detail in \cite{Huda92}, \cite{Berg92} some important principles
+of object-oriented programming already come with the use of type classes.
+
+There are some examples --- e.\,g.\ ones related to problems of strict
+versus non-strict inheritance (see e.\,g.\ \cite{Limo92},\cite{Temp92}
+--- which
+cannot be expressed in the type system of {\sf Axiom} and which could
+be expressed using more sophisticated techniques of object-oriented
+programming. However, as we will show in Sec.~\ref{coerinstr} there
+are properties of a type system which cannot be expressed by
+mechanisms of object-oriented programming alone but require an
+additional concept. So it may be preferable to use a system which is
+as simple as possible, even if not every example can be expressed in
+it.\footnote{There seems to be one single example which is used by
+several authors --- e.\,g.\ in \cite{Limo92} and in \cite{Baum95} ---
+implying the need of non-strict inheritance in a computer algebra
+system!}
+
+\section{Coercions}
+\label{chapcoer}
+
+In mathematics the convention to identify an object with its image
+under an embedding is used frequently. It is certainly one of sources
+of strength of mathematical notation. Very often certain structures
+are constructed as being of quite different shape and then this
+convention is used to identify one with a certain subset of another
+one. Some examples which are explained in many textbooks are the
+``subset relationship''
+$$\NN\subseteq\ZZ\subseteq\QQ\subseteq\RR\subseteq\CC,$$ embeddings of
+elements of $\QQ$ in algebraic extensions of $\QQ$ or in a $p$-adic
+completion, or the embeddings of elements of a commutative ring $R$ in
+$R[x]$, \ldots
+
+If these mathematical structures correspond to types in a system and
+the embeddings are computable functions, then this convention can be
+modeled by the use of {\em coercions}.
+
+While the use of implicit conversions instead of explicit conversions
+might be debatable for parts of a system in which new efficient
+algorithms have to be written, it is certainly necessary for a user
+interface.
+
+\subsection{General Remarks}
+
+We will assume that we have a mechanism to declare some functions
+between types to be {\em implicit coercions\/} between these types (or
+simply {\em coercions}). If there is a coercion $\phi: t_1
+\longrightarrow t_2$ we will write $t_1 \subtype t_2$.
+
+\begin{remark} The requirement of set theoretic ground types and
+coercion functions excludes some constructions --- if we gave all
+types the ``obvious'' set theoretic interpretation ---, as the one
+used in in \cite[Lemma~2]{Mitc91}, which assumes a coercion from the
+space of functions $\tf{FS}(D,D)$ over some domain $D$ into this
+domain. Such coercions which correspond to certain constructions of
+models of the $\lambda$-calculus (see e.\,g.\ \cite{Bare84}) seem to
+be of theoretical interest only. At least for the purpose of a
+computer algebra system the requirement of set theoretic coercion
+functions does not seem to be a restriction at all!
+\end{remark}
+
+\subsection{Coherence}
+\label{seccoh}
+
+In a larger system, it is possible that there are different ways to
+have a coercion from one type into another. Following \cite{Brea91}
+and \cite{Reyn91} we will call a type system {\em coherent}, if the
+coercions are independent of the way they are deduced in the
+system.\footnote{Notice that the term ``coherence'' is used similarly
+in category theory (see e.\,g.\ \cite{Macl91}) but is used quite
+differently in connection with order-sorted algebras (e.\,g.\ in
+\cite{Wald92}, \cite{Gogu92}, \cite{Rect89}).}
+
+In the following we will look at different kinds of coercions which
+occur and we will state some conditions which will yield the coherence
+of the system. Besides the technical proof of the coherence theorem
+we will give some informal discussions about the significance of these
+conditions.
+
+\subsubsection{Motivating Examples}
+
+Consider the expression $$ \tf{t} - \left( \begin{array}{cc} 1 & 0 \\
+3 & \frac{1}{2} \end{array} \right) $$ which --- as a mathematician
+would conclude --- denotes a $2 \times 2$-matrix over $\QQ[\tf{t}]$
+where $\tf{t}$ is the usual shorthand for $\tf{t}$ times the identity
+matrix. In an {\sf Axiom} like type system, this expression involves
+the following types and type constructors: The integral domain \tf{I}
+of integers, the unary type constructor \tf{FF} which forms the
+quotient field of an integral domain, the binary type constructor
+\tf{UP} which forms the ring of univariate polynomials over some ring
+in a specified indeterminate, and the type constructor $\tf{M}_{2,2}$
+building the $2 \times 2$-matrices over a commutative ring.
+
+
+In order to type this expression correctly several of the following
+coercions have to be used.
+
+\begin{center} \xext=1600 \yext=1200
+\begin{picture}(\xext,\yext)(\xoff,\yoff) \resetparms
+\setsqparms[1`-1`-1`1;1100`700]
+\putsquare(0,0)[\tf{UP}(\tf{I},\tf{\tf{t}})`\tf{UP}(\tf{FF}(\tf{I}),\tf{\tf{t}})
+ `\tf{I}`\tf{FF}(\tf{I});```]
+\putsquare(400,400)[\tf{M}_{2,2}(\tf{UP}(\tf{I},\tf{\tf{t}}))
+`\tf{M}_{2,2}(\tf{UP}(\tf{FF}(\tf{I}),\tf{\tf{t}}))
+`\tf{M}_{2,2}(\tf{I})`\tf{M}_{2,2}(\tf{FF}(\tf{I}));```]
+\putmorphism(110,110)(1,1)[``]{140}1b
+\putmorphism(110,810)(1,1)[``]{140}1b
+\putmorphism(1210,110)(1,1)[``]{140}1b
+\putmorphism(1210,810)(1,1)[``]{140}1b
+\end{picture}
+\end{center}
+
+There are different ways to coerce $\tf{I}$ to
+$\tf{M}_{2,2}(\tf{UP}(\tf{FF}(\tf{I}),\tf{\tf{t}}))$. Of course one
+wants the embedding of $\tf{I}$ in
+$\tf{M}_{2,2}(\tf{UP}(\tf{FF}(\tf{I}),\tf{\tf{t}}))$ to be independent
+of the particular choice of the coercion functions.
+
+In this example this independence seems to be the case, but how can we
+{\em prove} it? Moreover, not all coercions which would be desirable
+for a user share this property. Consider e.\,g.\ the binary type
+constructor ``direct sum'' $\oplus$ defined for Abelian groups. One
+could coerce $A$ into $A \oplus B$ via a coercion $\phi_1$ and $B$
+into $A \oplus B$ via a coercion $\phi_2$. But then the image of $A$
+in $A \oplus A$ depends on the choice of the coercion function!
+
+\subsubsection{Definition}
+
+
+Relying on the set theoretic semantics for our types and coercion
+functions we can give the following definition of coherence.
+
+{\bf Definition 20. (Coherence)}
+\index{coherence|ii}
+\label{defcoh}
+{\sl A type system is {\em coherent} if the following condition is satisfied:
+
+\begin{itemize} \item[] For any ground types $t_1$ and $t_2$ of the
+type system, if $\phi,\psi: t_1 \longrightarrow t_2$ are coercions
+then $\phi=\psi$.
+\end{itemize}}
+
+\subsubsection{General Assumptions}
+
+It will be convenient to declare each identity function on a type to
+be an implicit coercion.
+
+{\bf Assumption 1.}
+\label{A1}
+{\sl For any ground type $t$ the identity on
+$t$ will be a coercion. If $\phi: t_1 \longrightarrow t_2$ and $\psi:
+t_2 \longrightarrow t_3$ are coercions, then the composition $\phi
+\circ \psi : t_1 \longrightarrow t_3$ of $\phi$ and $\psi$ is a
+coercion.}
+
+{\bf Lemma 6.}
+\label{lem1}
+{\sl If assumption 1 holds, then the set of ground types as objects
+together with the coercion functions as arrows form a category.}
+
+\begin{proof}
+Since composition of functions is associative and the identity
+function is a coercion, all axioms of a category are fulfilled.\qed
+\end{proof}
+
+In the following we will always assume that assumption 1 holds
+even if we do not mention it explicitly.
+
+\subsubsection{Base Types}
+
+It is a good instrument for structuring data types to have only as few
+types as possible as base types but to construct them by a type
+constructor whenever possible.\footnote{As an example consider the
+field of rational numbers, which can be constructed as the quotient
+field of the integers.}
+
+Since there are only very few coercions between base types the
+following assumption seems to be easily satisfiable.
+
+{\bf Assumption 2. (Base Types)}
+\label{abasety}
+The subcategory of base types and coercions between base types forms a
+preorder, i.\,e.\ if $t_1$ and $t_2$ are base types and $\phi,\psi:
+t_1 \longrightarrow t_2$ are coercions then $\phi=\psi$.
+
+\subsubsection{Structural Coercions}
+
+{\bf Definition 21. (Structural Coercions)}
+{\sl The $n$-ary type
+constructor ($n \geq 1$) $f$ induces a {\em structural coercion\/}, if
+there are sets ${\cal A}_f \subseteq \{1, \ldots, n \}$ and ${\cal
+M}_f \subseteq \{1, \ldots, n \}$ such that \index{ Af@${\cal
+A}_f$|ii}\index{ Mf@${\cal M}_f$|ii} the following condition is
+satisfied:}
+
+{\sl Whenever there are declarations $f: (\sigma_1 \cdots \sigma_n)\sigma$
+and $f: (\sigma'_1 \cdots \sigma'_n)\sigma'$ and ground types
+$t_1:\sigma_1, \ldots, t_n:\sigma_n$ and $t'_1:\sigma'_1, \ldots,
+t'_n:\sigma'_n$ such that $t_i=t'_i$ if $i \notin {\cal A}_f \cup
+{\cal M}_f$ and there are coercions $$\begin{array}{ll} \phi_i: t_i
+\longrightarrow t'_i, & \mbox{if }i \in {\cal M}_f,\\ \phi_i: t'_i
+\longrightarrow t_i, & \mbox{if }i \in {\cal A}_f, \\ \phi_i = {\rm
+id}_{t_i} = {\rm id}_{t'_i} , & \mbox{if }i \notin {\cal A}_f \cup
+{\cal M}_f, \end{array}$$ then there is a {\em uniquely defined\/}
+coercion }
+$${\cal F}_f(t_1,\ldots,t_n,t'_1, \ldots, t'_n, \phi_1,
+\ldots, \phi_n) : f(t_1,\ldots,t_n) \longrightarrow
+f(t'_1,\ldots,t'_n).$$
+
+{\sl The type constructor $f$ is {\em covariant in its $i$-th argument}, if
+$i \in {\cal M}_f$. \index{covariant!type constructor|ii}\index{type
+constructor!covariant|ii} It is {\em contravariant in its $i$-th
+argument}, if $i \in {\cal A}_f$. \index{contravariant!type
+constructor|ii} \index{type constructor!contravariant|ii}}
+
+Instead of the adjective ``covariant'' we will sometimes use the
+adjective ``monotonic'', and instead of ``contravariant'' we will
+sometimes use ``antimonotonic'', because both terminologies are used
+in the literature and reflect different intuitions which are useful in
+different contexts.
+
+{\bf Assumption 3. (Structural Coercions)}
+\label{Astruct}
+{\sl Let $f$ be $n$-ary type constructor which induces a structural
+coercion and let $f(t_1,\ldots,t_n)$, $f(t'_1,\ldots,t'_n)$, and
+$f(t''_1,\ldots,t''_n)$ be ground types. Assume that}
+$$
+\begin{array}{ll} t_i \subtype t'_i \subtype t''_i, & \mbox{if }i
+\in {\cal M}_f,\\ t''_i \subtype t'_i \subtype t_i, & \mbox{if }i \in
+{\cal A}_f, \\ t_i = t'_i=t''_i, & \mbox{if }i \notin {\cal A}_f \cup
+{\cal M}_f.
+\end{array}$$
+{\sl and let $\phi_i : t_i \longrightarrow
+t'_i$, $\phi'_i : t'_i \longrightarrow t''_i$ (if $i \in {\cal M}_f$),
+and $\phi'_i : t''_i \longrightarrow t'_i$, $\phi_i : t'_i
+\longrightarrow t_i$ (if $i \in {\cal A}_f$) be coercion functions.
+For $i \notin {\cal A}_f \cup {\cal M}_f$ let $\phi_i$ and $\phi'_i$
+be the appropriate identities.}
+
+{\sl Then the following conditions are satisfied:
+\begin{enumerate}
+\item ${\cal F}_f(t_1,\ldots,t_n,t_1, \ldots, t_n, {\rm id}_{t_1}, \ldots,
+{\rm id}_{t_n})$ is the identity on $f(t_1,\ldots,t_n)$,
+\item ${\cal F}_f(t_1,\ldots,t_n,t''_1, \ldots, t''_n, \phi_1 \circ \phi'_1,
+\ldots, \phi_n \circ \phi'_n) =$ \\ ${\cal F}_f(t_1,\ldots,t_n,t'_1,
+\ldots, t'_n, \phi_1, \ldots, \phi_n) \circ {\cal
+F}_f(t'_1,\ldots,t'_n,t''_1, \ldots, t''_n, \phi'_1, \ldots, \phi'_n).$
+\end{enumerate} }
+
+Let $f: (\sigma_1 \cdots \sigma_n) \sigma$ be an $n$-ary type
+constructor which induces a structural coercion.
+\label{defcatsigmai}
+Let $\cat{C}_{\sigma_i}$ be the category of ground types of sort
+$\sigma_i$ as objects and the coercions as arrows, let
+$\cat{C}_{\sigma_i}^{\rm op}$ be the dual category of
+$\cat{C}_{\sigma_i}$ and let $\cat{C}_{\sigma_i}^{\rm triv}$ be the
+discrete subcategory of the objects of $\cat{C}_{\sigma_i}$. Define
+$$ \cat{C}_i=\left\{ \begin{array}{ll} \cat{C}_{\sigma_i}, & \mbox{if
+}i \in {\cal M}_f,\\ \cat{C}_{\sigma_i}^{\rm op}, & \mbox{if }i \in
+{\cal A}_f,\\ \cat{C}_{\sigma_i}^{\rm triv}, & \mbox{if }i \notin
+{\cal A} \cup {\cal M}_f. \end{array} \right. $$ Then
+assumption 3 means that the mapping assigning $f(t_1,
+\ldots, t_n)$ to the $n$-tuple $(t_1, \ldots, t_n)$ and assigning the
+coercion $${\cal F}_f(t_1,\ldots,t_n,t'_1, \ldots, t'_n, \phi_1,
+\ldots, \phi_n)$$ to the $n$-tuple $(\phi_1, \ldots, \phi_n)$ of
+coercions is a {\em functor} from $$\cat{C}_1 \times \cdots \times
+\cat{C}_n$$ into $\cat{C}_\sigma$.
+
+\label{s43}
+Typical examples of type constructors which induce a
+structural coercion are \tf{list}, \tf{UP}, $\tf{M}_{n,n}$, $\tf{FF}$.
+These examples give rise to structural coercions, because the
+constructed type can be seen as an instance of the parameterized type
+class $\tf{sequence}$ (cf.\ Sec.~\ref{chapseq}).\footnote{The
+sequences can be of fixed finite length, as in the case $\tf{FF}$
+where it consists of two elements only, the numerator and the
+denominator.} The coercions between the constructed types are then
+obtained by {\em mapping} the coercions between the type parameter
+into the sequence. Since a mapping of functions distributes with
+function composition, assumption 3 will be satisfied by
+these examples.
+
+Although many examples of structural coercions satisfying
+assumption 3 can be explained by this mechanism, there are
+others, which will satisfy assumption 3 because of another
+reason, so that the more general framework we have chosen is
+justified. For instance, it is another mechanism which gives rise to
+the structural coercion in the case of the ``function space'' type
+constructor, as is well known.\footnote{See e.\,g.\
+\cite{Card86}.} It is contravariant in its first argument and
+covariant in its second argument, as the following considerations
+show: Let $A$ and $B$ be two types where there is an implicit coercion
+$\phi$ from $A$ to $B$. If $f$ is a function from $B$ into a type
+$C$, then $f \circ \phi $ is a function from $A$ into $C$. Thus any
+function from $B$ into $C$ can be coerced into a function from $A$
+into $C$. Thus an implicit coercion from $\tf{FS}(B,C)$ into
+$\tf{FS}(A,C)$ can be defined, i.\,e.\ $\tf{FS}(B,C) \subtype
+\tf{FS}(A,C)$. If $C \subtype D$ by an implicit coercion $\psi$, then
+$\psi \circ f$ is a function from $A$ into $D$, i.\,e.\ an implicit
+coercion from $\tf{FS}(A,C)$ into $\tf{FS}(A,D)$ can be defined. In
+this case assumption 3 is satisfied because of the
+associativity of function-composition.
+
+Although many important type constructors arising in computer algebra
+are monotonic in all arguments it is not justified to assume that this
+property will always hold as was done in \cite{Como91}. We
+have already seen that the type constructor for building ``function
+spaces'' is antimonotonic in its first argument. Constructions like
+the fixpoint field of a certain algebraic extension of $\QQ$ under a
+group of automorphisms in Galois theory (see e.\,g.\
+\cite{Zari75}, \cite{Marc77}, \cite{Lang05}) would give
+other --- more algebraic examples --- of type constructors which are
+antimonotonic.\footnote{In {\sf GAP} \cite{GAPx17} such constructs
+are implemented as functions and not as type constructors, cf.\ the
+discussion in Sec.~\ref{sgroupth}. Nevertheless, the implementation
+as type constructors seems to be a reasonably possibility.}
+
+However, an assumption that all type constructors are monotonic or
+antimonotonic in all arguments as in \cite{Fuhx90}, \cite{Mitc91}
+still seems to be too restrictive for our purposes.
+
+If one allows a type constructor building references (pointers) to
+objects of a certain type as is possible in Standard ML or in the
+system described by Kaes \cite{Kaes92}, then this type constructor is
+neither monotonic nor antimonotonic.
+
+There are also algebraic examples of type constructors which are
+neither monotonic nor antimonotonic. Consider e.\,g.\ the quotient
+groups $G/G'$, where $G'$ is the derived subgroup of $G$ (see e.\,g.\
+\cite[p.~28]{Robi96}). Assume that $H$ can be embedded in
+$G$. Then in general it is not possible to embed $H/H'$ in $G/G'$ or
+vice versa. Thus if one would have a type constructor building the
+type $G/G'$ for a given group $G$, then this type constructor would be
+neither monotonic nor antimonotonic.
+
+\begin{remark}
+Of course, one has to restrict the groups in consideration to ones for
+which the construction of $G/G'$ can be performed effectively. One
+such class of groups is that of the finite polycyclic groups
+(cf. \cite{GAPx17}).
+\end{remark}
+
+\subsubsection{Direct Embeddings in Type Constructors}
+
+{\bf Definition 22. (Direct Embeddings)}
+\label{defdiem}
+\index{direct embedding|ii}
+\index{embedding!direct|ii}
+{\sl Let $f:(\sigma_1, \ldots, \sigma_n)\sigma$ be a $n$-ary type
+constructor. If for some ground types $t_1:\sigma_1, \ldots,
+t_n:\sigma_n$ there is a coercion
+function $$\Phi^{i}_{f,t_1,\ldots,t_n}: t_i \longrightarrow
+f(t_1,\ldots,t_n),$$ then we say that {\em $f$ has a direct embedding
+at its $i$-th position}.}
+
+{\sl Moreover, let $${\cal D}_f= \{i \mid \mbox{$f$ has a direct embedding
+at its $i$-th position}\}$$ \index{ Df@${\cal D}_f$|ii} be the {\em
+set of direct embedding positions of $f$}.}
+
+\begin{remark}
+In {\sf Axiom} the inverses of direct embeddings are
+called {\em retractions} (cf.\ \cite[p.~713]{Jenk92}) assuming
+that the direct embeddings are always injective. Thus the usage of
+the term in {\sf Axiom} is a special case of our usage of that term,
+since in our terminology any partial function which is an inverse of
+any injective coercion can be a retraction.
+
+On the other hand the {\sf Axiom} terminology shows that the designers
+of {\sf Axiom} have seen the importance of direct embeddings, even if
+there is no special terminology for direct embeddings themselves but
+only for their inverses!
+\end{remark}
+
+\begin{remark}
+In a system, a type constructor represents a
+parameterized abstract data type which is usually built uniformly from
+its parameters. So the family of coercion functions
+$$\{\Phi^{i}_{f,t_1,\ldots,t_n} \mid t_i \in T_\Sigma(\{\})_{\sigma_i}
+\}$$ will very often be just one ({\em polymorphic\/}) function. In
+this respect the situation is similar to the one in Sec.~\ref{s43}.
+\end{remark}
+
+{\bf Assumption 4. (Direct Embeddings)}
+\label{Aemb}
+{\sl Let $f:(\sigma_1 \cdots \sigma_n)\sigma$ be a $n$-ary type constructor.
+
+Then the following conditions hold:
+\begin{enumerate}
+\item $|{\cal D}_f|\leq 1$.
+\item The coercion functions which give rise to the
+direct embedding are unique, i.\,e.\ if $\Phi^{i}_{f,t_1,\ldots,t_n}:
+t_i \longrightarrow f(t_1,\ldots,t_n)$ and
+$\Psi^{i}_{f,t_1,\ldots,t_n}: t_i \longrightarrow f(t_1,\ldots,t_n)$,
+then $$\Phi^{i}_{f,t_1,\ldots,t_n}=\Psi^{i}_{f,t_1,\ldots,t_n}.$$
+\end{enumerate} }
+
+Many important type constructors such as $\tf{list}$, $\tf{M}_{n,n}$,
+$\tf{FF}$, and in general the ones describing a ``closure'' or a
+``completion'' of a structure --- such as the $p$-adic completions or
+an algebraic closure of a field --- are unary. Since for unary type
+constructors the condition $|{\cal D}_f| \leq 1$ is trivial and the
+second condition in assumption 4 should be always fulfilled,
+the assumption holds in this cases.
+
+For $n$-ary type constructors ($n \geq 2$) the requirement $|{\cal
+D}_f| \leq 1$ might restrict the possible coercions. Consider the
+``direct sum'' type constructor for Abelian groups which we have
+already seen that it could lead to a type system that is not coherent
+if we do not restrict the possible coercions. For a type constructor
+$$\oplus: (\cf{Abelian\_group} \; \cf{Abelian\_group})
+\cf{Abelian\_group}$$ the requirement $|{\cal D}_f| \leq 1$ means that
+it is only possible to have either an embedding at the first position
+or at the second position.
+
+In the framework that we have used the types $A \oplus B$ and $B
+\oplus A$ will be different. However, the corresponding mathematical
+objects are {\em isomorphic}. Having a mechanism in a language that
+represents certain isomorphic mathematical objects by the same type
+(cf.\ Sec.~\ref{chtyiso}) the declaration of both natural embeddings
+to be coercions would not lead to an incoherent type system. Notice
+that such an additional mechanism, which corresponds to factoring the
+free term-algebra of types we regard by some congruence relation, will
+be a conservative extension for a coherent type system. If a type
+system was coherent, it will remain coherent. It is only possible that
+a type system being incoherent otherwise becomes coherent.
+
+Let $f:(\sigma \sigma')\sigma$ be a binary type constructor with
+$\sigma$ and $\sigma'$ incomparable having direct embeddings at the
+first and second position, and let $t : \sigma$ and $t' : \sigma'$ be
+ground types such that $$t' \subtype f(f(t,t'),t').$$ Then there are
+two possibilities to coerce $t'$ into $f(f(t,t'),t')$ which might be
+different in general. In the case of types $\tf{R} : \cf{c\_ring}$
+and $\tf{x} : \cf{symbol}$ the coercions of $\tf{x}$ into
+$\tf{UP}(\tf{UP}(\tf{R},\tf{x}),\tf{x})$ are unambiguous, if
+$\tf{UP}(\tf{UP}(\tf{R},\tf{x}),\tf{x})$ and $\tf{UP}(\tf{R},\tf{x})$
+are the same type. However, it does not seem to be generally possible
+to avoid the condition $|{\cal D}_f| \leq 1$ even in cases where a
+type constructor is defined for types belonging to incomparable type
+classes.
+
+The naturally occurring direct embeddings for types built by the type
+constructors $\tf{FF}$ and $\tf{UP}$ show that in the context of
+computer algebra there are cases in which a coercion is defined into a
+type belonging to an incomparable type class, into a type belonging to
+a more general type class, into a type belonging to a less general
+type class, or into a type belonging to the same type class. So
+coercions occur quite ``orthogonal'' to the inheritance hierarchy on
+the type classes showing an important difference between the coercions
+in computer algebra and the ``subtypes'' occurring in object oriented
+programming (cf.\ Sec.~\ref{coerinstr}).
+
+The next assumption will guarantee that structural coercions and
+direct embeddings will interchange nicely.
+
+{\bf Assumption 5. (Structural Coercions and Embeddings)}
+\label{Accemb}
+{\sl Let $f$ be a $n$-ary type constructor which induces a
+structural coercion and has a direct embedding at its $i$-th position.
+Assume that $f:(\sigma_1 \cdots \sigma_n)\sigma$ and $f:(\sigma'_1
+\cdots \sigma'_n)\sigma$, $t_1:\sigma_1, \ldots, t_n:\sigma_n$, and
+$t'_1:\sigma'_1, \ldots, t'_n:\sigma'_n$. If there are coercions
+$\psi_i: t_i \longrightarrow t'_i$, if the coercions
+$\Phi^{i}_{f,t_1,\ldots,t_n}$ and $\Phi^{i}_{f,t'_1,\ldots,t'_n}$ are
+defined, and if $f$ is covariant at its $i$-th argument, then the
+following diagram is commutative:}
+
+\begin{center}
+\setsqparms[1`1`1`1;2600`600]
+\square[t_i`t'_i`f(t_1,\ldots,t_n)`f(t'_1,\ldots,t'_n);
+\psi_i`
+{\Phi^{i}_{f,t_1,\ldots,t_n}}`
+{\Phi^{i}_{f,t'_1,\ldots,t'_n}}`
+{{\cal F}_f(t_1,\ldots,t_n,t'_1,\ldots,t'_n,
+\psi_1, \ldots, \psi_n)}]
+\end{center}
+
+{\sl If $f$ is contravariant at its $i$-th argument,
+then the following diagram is commutative:}
+\begin{center}
+\setsqparms[1`1`1`-1;2600`600]
+\square[t_i`t'_i`f(t_1,\ldots,t_n)`f(t'_1,\ldots,t'_n);
+\psi_i`
+{\Phi^{i}_{f,t_1,\ldots,t_n}}`
+{\Phi^{i}_{f,t'_1,\ldots,t'_n}}`
+{{\cal F}_f(t_1,\ldots,t_n,t'_1,\ldots,t'_n,
+\psi_1, \ldots, \psi_n)}]
+\end{center}
+
+The type constructors \tf{list}, \tf{UP}, $\tf{M}_{n,n}$ may serve as
+examples of constructors which induce structural coercions and can
+also have direct embeddings: It might be useful to have coercions from
+elements into one element lists, from elements of a ring into a
+constant polynomial or to identify a scalar with its multiple with the
+identity matrix.
+
+As was already discussed
+in Sec.~\ref{s43}, in all these examples the
+parameterized data types can be seen as sequences and the
+structural coercions ---
+i.\,e.\ ${\cal F}_\tf{UP}(\tf{I},\tf{x},
+\tf{FF}(\tf{I}),\tf{x},
+\psi, {\rm id}_{\tf{x}})$ ---
+can be seen as a kind of ``mapping'' operators.
+
+The direct embeddings are ``inclusions'' of
+elements in these sequences.
+Since applying a coercion function to such an elements
+and then ``including'' the result in a sequence
+will yield the same result as first including
+the element in the sequence and then ``mapping'' the
+coercion function into the sequence,
+assumption 5 will be satisfied by these examples.
+For instance,
+$${\cal F}_\tf{UP}(\tf{I},\tf{x},\tf{FF}(\tf{I}),\tf{x},
+\Phi^{1}_{\tf{FF},\tf{I}}, {\rm id}_{\tf{x}})$$
+is the function which maps the coercion function
+$\Phi^{1}_{\tf{FF},\tf{I}}$
+to the sequence of elements of $\tf{I}$ in $\tf{UP}(\tf{I},\tf{x})$
+which represents the polynomial.
+
+Thus the diagrams
+\begin{center}
+\resetparms
+\setsqparms[1`1`1`1;1000`500]
+\square[\tf{I}`\tf{FF}(\tf{I})`
+\tf{UP}(\tf{I},\tf{\tf{t}})`\tf{UP}(\tf{FF}(\tf{I}),\tf{\tf{t}});```]
+\end{center}
+and
+\begin{center}
+\resetparms
+\setsqparms[1`1`1`1;1000`500]
+\square[\tf{I}`\tf{UP}(\tf{I},\tf{\tf{t}})`
+\tf{M}_{2,2}(\tf{I})`\tf{M}_{2,2}(\tf{UP}(\tf{I},\tf{\tf{t}}));```]
+\end{center}
+and
+\begin{center}
+\resetparms
+\setsqparms[1`1`1`1;1000`500]
+\square[\tf{I}`\tf{FF}(\tf{I})`
+\tf{M}_{2,2}(\tf{I})`\tf{M}_{2,2}(\tf{FF}(\tf{I}));```]
+\end{center}
+which are instances of the diagrams
+in assumption 5 are commutative.\footnote{The
+first of these diagrams can also be found in
+\cite{Fort90}.}
+
+\bigskip
+If the mathematical structure
+represented by a type $t_i$
+in assumption 5 has non-trivial
+automorphisms, then it is possible to
+construct the structural coercion
+$${{\cal F}_f(t_1,\ldots,t_n,t'_1,\ldots,t'_n,
+\psi_1, \ldots, \psi_n)}$$
+in a way such that the assumption is
+violated: just apply a non-trivial automorphism
+to $t_i$!
+However, such a construction seems to be artificial.
+Moreover, the argument shows that
+a possible violation of assumption 5
+``up to an automorphism'' can be avoided by an
+appropriate definition of
+$${{\cal F}_f(t_1,\ldots,t_n,t'_1,\ldots,t'_n,
+\psi_1, \ldots, \psi_n)}.$$
+
+\subsubsection{A Coherence Theorem}
+
+We are now ready to state the main result of this section.
+The assumptions 1, 2, 3, 4, and 5 are
+ ``local'' coherence conditions
+imposed on the coercions of the type system.
+In the following theorem we will prove
+that the type system is ``globally'' coherent,
+if these local conditions are satisfied.
+
+{\bf Theorem 6. (Coherence)}
+\label{thmain}
+{\sl Assume that all coercions between ground types
+are only built by one of the following mechanisms:
+\begin{enumerate}
+\item coercions between base types;
+\item coercions induced by structural coercions;
+\item direct embeddings in a type constructor;
+\item composition of coercions;
+\item identity function on ground types as coercions.
+\end{enumerate}
+If the assumptions 1, 2, 3, 4, and 5 are satisfied,
+then the set of ground types as objects and the coercions
+between them as arrows form a category which is a preorder.}
+
+\begin{proof}
+
+By assumption 1 and lemma 6 the
+set of ground types as objects and the coercions
+between them as arrows form a category.
+
+For any two ground types $t$ and $t'$ we will
+prove by induction on the complexity
+of $t'$ that if
+$\phi, \psi : t \longrightarrow t'$ are coercions
+then $\phi=\psi$ which will establish the theorem.
+
+If $\com(t')=1$ then we have $\com(t)=1$ because of
+the assumption on the possible mechanisms for building coercions.
+Since $\com(t)=1$ and $\com(t')=1$ the claim
+follows from assumption 2.
+
+Now assume that the induction hypothesis holds
+for $k$
+and let $\com(t')=k+1$.
+Thus we can assume that
+$t'=f(u_1,\ldots,u_n)$ for some $n$-ary type
+constructor $f$.
+
+Let $\phi, \psi : t \longrightarrow t'$ be coercions.
+
+
+The coercions $\phi$ and $\psi$ are
+compositions of coercions
+between base types, direct embeddings in type
+constructors and structural coercions.
+Because of assumption 3
+and the induction hypothesis we
+can assume that there are ground
+types $s_1$ and $s_2$ and
+unique coercions $\psi_1: t \longrightarrow s_1$
+and $\psi_2: t \longrightarrow s_2$
+such that
+\begin{equation}
+\label{e1}
+\phi = {\cal F}_f(\ldots, t, \ldots , s_1,
+\ldots , \psi_1, \ldots )
+\end{equation}
+or
+\begin{equation}
+\label{e2}
+\phi= \psi_1 \circ \Phi^i_{f, \ldots, s_1, \ldots}
+\end{equation}
+Similarly,
+\begin{equation}
+\label{e3}
+\psi = {\cal F}_f(\ldots, t, \ldots , s_2,
+\ldots , \psi_2, \ldots )
+\end{equation}
+or
+\begin{equation}
+\label{e4}
+\psi= \psi_2 \circ \Phi^j_{f, \ldots, s_2, \ldots}
+\end{equation}
+If $\phi$ is of form \ref{e1} and $\psi$ is
+of form \ref{e3}, then $\phi=\psi$
+because of assumption 3
+and the uniqueness of
+${\cal F}_f$.
+If $\phi$ is of form \ref{e2} and $\psi$ is
+of form \ref{e3}, then $\phi=\psi$
+because of assumption 5.
+Analogously for $\phi$ of form \ref{e1}
+and $\psi$ of form \ref{e4}.
+
+If $\phi$ is of form \ref{e2} and $\psi$ is of form \ref{e3}
+then assumption 4 implies that
+$i=j$ and $s_1=s_2$. Because of the induction
+hypothesis we have $\psi_1 = \psi_2$ and
+hence $\phi=\psi$ again by assumption 4.
+\qed
+\end{proof}
+
+\subsection{Type Isomorphisms}
+\label{chtyiso}
+
+In several important cases there is
+not only a coercion from a type
+$A$ into a type $B$ but also one
+from $B$ into $A$.
+So there are coercions from univariate polynomials
+in sparse representation over some ring
+to ones in dense representation and vice versa.
+Or we have
+$$\tf{FF}(t_\cf{integral\_domain}) \subtype
+\tf{FF}(\tf{FF}(t_\cf{integral\_domain}))$$
+and
+$$\tf{FF}(\tf{FF}(t_\cf{integral\_domain}))
+\subtype \tf{FF}(t_\cf{integral\_domain}).$$
+Other examples can be found in Sec.~\ref{paramtyiso}.
+If $A \subtype B$ and $B \subtype A$ then we will
+write $A \typeiso B$.
+
+If we require that for coercions
+$$\begin{array}{l}
+\phi: A \longrightarrow B, \\
+\psi: B \longrightarrow A
+\end{array}
+$$
+the compositions $\phi \circ \psi$ and $\psi \circ \phi$
+are the identities on $A$ resp.\ $B$, then
+the coherence theorem 6 can be extended
+to the case of type isomorphisms.\footnote{Obviously,
+the conditions that $\phi$ and $\psi$ are true inverses
+of each other is also a necessary condition
+for coherence.}
+
+So type isomorphisms can be seen as equivalence classes in the
+preorder on types induced by the coercions. However, there are
+several reasons to treat type isomorphisms by a new typing construct
+independent from the concept of coercions. As we have shown in
+Sec.~\ref{chparamtycl} there is usually the second-order type of a
+category present in {\sf Axiom} for a class of equivalent types. On
+the one hand if coercions are present in the system the equivalence
+classes in the coercion preorder can be deduced by a system so that it
+is not necessary to define them by the programmer.\footnote{In
+{\sf Axiom} the isomorphic types are treated independently of the
+ coercions.} On the other hand --- at least for the purpose of a
+user interface --- it seems to be useful to have a class of isomorphic
+types present as a first-order type. Since all equivalence classes in
+the coercion preorder are finite --- only finitely many (possibly
+polymorphic) functions can be defined to be coercions --- the type of
+finite disjoint unions --- variant record types --- can serve as a
+well known first-order type for that purpose
+(cf.\ \cite[p.~46]{That91}).
+
+Moreover, it is reasonable to assume that type isomorphisms have the
+following properties which cannot be deduced from the properties of
+general coercion functions.
+\begin{enumerate}
+\item Isomorphic types belong to the same type class, i.\,e.\ if $t_1
+ : \sigma$ and $t_1 \typeiso t_2$ then $t_2 : \sigma$.
+\item If $f: (\sigma_1 \cdots \sigma_n)\sigma$ is an $n$-ary type
+ constructor, $t_1:\sigma_1, \ldots, t_n:\sigma_n$, $t'_1:\sigma_1,
+ \ldots, t'_n:\sigma_n$, such that
+$$ t_i \typeiso t'_i \quad \forall i$$ then
+$$f(t_1, \ldots, t_n ) \typeiso f(t'_1, \ldots, t'_n ).$$
+\end{enumerate}
+
+The second condition is only implied by the rules for structural
+coercions if $f$ would be monotonic or antimonotonic in all arguments.
+Because of the second condition a {\em congruence relation} is defined
+by $\typeiso$ on the term-algebra of types.\footnote{It follows from
+the properties of $\subtype$ alone that $\typeiso$ defines an
+equivalence relation.} Thus we can built the factor algebra modulo
+this congruence relation. This factor algebra is isomorphic to the
+factor algebra modulo some equational theory, the equational theory
+which is obtained if we interpret $\typeiso$ as equality. We will
+call this equational theory {\em the equational theory corresponding
+to the type isomorphism.}
+
+For simplicity we will often neglect the sort constraints but will
+only write the unsorted part. Since for many examples in consideration
+the sort is always the same, these slightly sloppy view can be
+justified even formally.
+
+While it is useful to know that certain {\em different types} are
+isomorphic --- such as the sparse and dense representations of
+polynomials --- there are other cases where it seems to be more
+appropriate to have a semantics of the type system implying that
+certain types are actually {\em equal}.
+
+So the type system is not coherent if we define all naturally
+occurring embedding functions to be coercions and if we regard two
+types
+$$\tf{direct\_sum}(t_1,t_2) \mbox{ and } \tf{direct\_sum}(t_2,t_1)$$
+as being different. This example would not violate the coherence of
+the type system if we had not only two possible coercion functions
+implying that these types are isomorphic but if these types are
+actually {\em equal\/} in the system. Notice that an implementation
+of this type constructor having these properties is possible. One
+just has to use the same techniques as are used for the representation
+of general associative and commutative operators in certain
+term-rewriting systems (see e.\,g.\ \cite[Sec.~10]{Bund93},
+\cite{Bund93a}), i.\,e.\ a certain ordering on terms has to be
+given and the terms have to be represented in a {\em flattened form}.
+
+In Sec.~\ref{scoercprbl} we will give a family of type isomorphisms
+whose corresponding equational theory is not finitely axiomatizable.
+Thus all of these isomorphisms cannot be modeled by declaring finitely
+many functions to be coercions between types (even if we allow
+``polymorphic'' coercion functions between polymorphic types). So
+these type isomorphisms could be only modeled in the system by a
+direct mechanism implying that certain types are equal.
+
+\subsubsection{Independence of the Coercion Preorder
+from the Hierarchy of Type Classes}
+\label{coerinstr}
+
+If two types are isomorphic, then they belong to the same type class.
+
+Such a conclusion is not justified if there is only a coercion form
+$A$ into $B$. Consider for instance a field $K$. Its elements can be
+coerced to the constant polynomials in $K[x]$. Of course, the ring of
+polynomials over some field is no longer a field.
+
+However, it cannot be concluded in general that $A \subtype B$ and $A:
+\sigma$ implies $B: \tau$ for some $\sigma \leq \tau$. Just the
+opposite holds for many important examples!
+
+Consider e.\,g.\ the coercion from an integral domain into its field
+of fractions which is not only an integral domain but even a field.
+Similarly, any field can be embedded in its algebraic closure,
+i.\,e.\ in a structure which has additional ``nice'' properties,
+namely that it is an algebraically closed field. The constructions of
+the real numbers $\RR$ or of $p$-adic completions of $\QQ$ can be seen
+similarly. The field of rational numbers $\QQ$ can be embedded in
+these structures --- and is usually identified with its image under
+this embedding --- which are complete metric spaces, a property that
+the original structure did not have.
+
+The construction of structures which have additional ``nice''
+properties and in which the original structure can be embedded is an
+important tool for mathematical reasoning.\footnote{The author could
+ easily list several examples of such constructions from the area of
+ mathematics he has worked on. Since this area is non-constructive
+ we will omit them. However, it seems to be possible to find some
+ examples in almost {\em any} area of mathematics.} Usually, the
+original structures and their images under this embedding are not
+distinguished notationally.
+
+So the possibility to have coercions which induce a preorder on types
+that is quite independent on the preorder on types induced by the
+inheritance hierarchy on type classes seems to be important. Notice
+that these preorders would still differ even if we had allowed more
+sophisticated inheritance possibilities on type classes than the ones
+given in {\sf Axiom} or {\sf Haskell}. There have to be (at least)
+two hierarchies. The one corresponding to some form of
+``inheritance'': more special structures (such as a ``rings'') inherit
+all properties of more general ones (such as ``groups''), and another
+one reflecting possible embeddings of a structure into another that
+might have stronger properties.
+
+\begin{remark}
+Of course, it is desirable to have some form of control over the
+possibilities how coercions behave with respect to the hierarchy on
+type classes. This seems to be possible.
+
+All of the examples given above can be described by an unary type
+constructor $F$ such that for any types $A$ and $B$ of an appropriate
+sort the following holds:
+$$\begin{array}{l} \mbox{If }A \subtype B, \mbox{ then } F(A) \subtype
+ F(B),\\ A \subtype F(A), \\ F(F(A)) \typeiso F(A) .
+\end{array}$$
+Thus --- if we interpret $\subtype$ as $\subseteq$ and $\typeiso$
+as equality --- the type constructor $F$ has the properties of a
+{\em closure operator} (see e.\,g.\ \cite{Dave90},\cite{Laue82}).
+
+So the requirement that a type unary constructor which has a direct
+embedding and whose constructed type belongs to a type class with
+stronger properties then the type parameter has to be a closure
+operator in the sense of above would be fulfilled by many important
+examples. On the other hand such a restriction might allow much more
+efficient type inference algorithms so that it might be a reasonable
+requirement for a system.
+\end{remark}
+
+\subsubsection{Some Problematic Examples of Type Isomorphisms}
+
+In this section we will collect some natural examples of type
+isomorphisms which arise in the context of computer algebra. We will
+show that their corresponding equational theories are not unitary or
+even not finitary unifying or that the unification problem is even
+undecidable.
+
+In Sec.~\ref{sectypinfcoer} we will show why these properties of the
+corresponding equational theory are problematic in the context of type
+inference.
+
+We have already shown that a family of type isomorphisms whose
+corresponding equational theory is not finitely axiomatizable cannot
+be modeled by means of finitely many coercion functions and thus
+requires another concept. The presentation of a family of type
+isomorphisms having this property will be given in the next section
+because the proof of this property will need a little technical
+machinery.
+
+{\bf Example 1.}
+\label{isomac}
+As was mentioned above for the type constructor $\tf{direct\_sum}$ on
+Abelian groups the type isomorphisms
+$$\tf{direct\_sum}(t_1,t_2) \typeiso \tf{direct\_sum}(t_2,t_1),$$ and
+$$\tf{direct\_sum}(t_1,\tf{direct\_sum}(t_2,t_3)) \typeiso
+\tf{direct\_sum}(\tf{direct\_sum}(t_1,t_2),t_3)$$ hold.
+
+Thus $\tf{direct\_sum}$ would give rise to an equational theory modulo
+an associate and commutative operator. The unification problem for
+such an equational theory is decidable, but not unitary unifying.
+However, it is finitary unifying (cf.\ \cite{Siek89},
+\cite{Joua90}).
+
+{\bf Example 2.}
+\label{isomass}
+For the binary type constructor $\tf{pair}$ which builds the type of
+ordered pairs of elements of arbitrary types the following type
+isomorphisms hold:
+$$\tf{pair}(\tf{pair}(A,B),C) \typeiso \tf{pair}(A, \tf{pair}(B,C)),$$
+i.\,e.\ it corresponds to an associative equational theory.
+Unification for such theories is decidable but not finitary unifying
+\cite{Siek89}.
+
+{\bf Example 3.}
+\label{isounde}
+Let $A, B, C$ be vector spaces over some fixed field $K$ and let
+$\oplus$ denote the direct sum of vector spaces and $\otimes$ denote
+the tensor product of two vector spaces. Then we have
+$$ (A \oplus B) \otimes C \cong (A \otimes C) \oplus (B \otimes C)
+$$ (see e.\,g.\ \cite[p.~293]{Kowa63}.) Thus if we had two binary
+type constructors over vector spaces building direct sums and tensor
+products respectively, then the ``distributivity law'' gives rise to
+type isomorphisms. Since associativity and commutativity also hold
+for the type constructor building direct sums of vector spaces alone
+--- any vector space is an Abelian group --- we have the case of an
+equational theory having two operators obeying associativity,
+commutativity, and distributivity but no other equations.
+
+Unfortunately, unification for such theories is undecidable
+\cite{Siek89}, \cite{Szab82}.
+
+\begin{figure}[t]
+\begin{center}
+\begin{tabular}{|l|r|}
+\hline Type isomorphisms whose & Example given \\ corresponding
+equational theory & on page \\ \hline \hline is not unitary unifying &
+\pageref{isomac} \\ is not finitary unifying & \pageref{isomass}
+\\ has an undecidable unification problem & \pageref{isounde} \\ is
+not finitely axiomatizable &
+\pageref{begincoerpr}--\pageref{endcoerpro}\\ \hline
+\end{tabular}
+\end{center}
+\caption{Some problematic examples of type isomorphisms}
+\end{figure}
+
+\subsection{A Type Coercion Problem}
+\label{scoercprbl}
+\label{begincoerpr}
+
+In this section we want to present an example of a family of types
+which allow type-isomorphisms which correspond to an equational theory
+that is not finitely axiomatizable. In order to set up the example we
+first need a technical result.
+
+\subsubsection{A Technical Result}
+\label{s3}
+
+{\bf Definition 23.}
+{\sl Let $f : \{P,F\}^* \longrightarrow \{P,F\}^*$ be the function, which
+is defined by the following algorithm:
+\begin{itemize}
+\item[] If no $F$ is occurring in the input string, then return the
+ input string as output string.
+
+Otherwise, remove any $F$ except the leftmost occurrence from the
+input string and return the result as output string.
+\end{itemize}}
+
+{\sl Let $\equiv$ be the binary relation on $\{P,F\}^*$ which is defined by}
+$$\forall v,w \in \{P,F\}^*: \; v \equiv w \iff f(v)=f(w).$$
+
+Obviously, the function $f$ can be computed in linear time and the
+relation $\equiv$ is an equivalence relation on $\{P,F\}^*$.
+
+Let $\Sigma$ be the first-order signature consisting of the two unary
+function Symbols $F$ and $P$. We will now lift the equivalence
+relation $\equiv$ to a set of equations over $\Sigma$.
+
+{\bf Definition 24.}
+\label{deeqe}
+{\sl Let ${\cal E}$ be the following set of equations:}
+$$\begin{array}{lll} {\cal E} = \{ & S_1(S_2(\cdots S_k(x)\cdots) =
+ S_{k+1}(S_{k+2}(\cdots S_r(x)\cdots )) \mid \\ & \;\; S_i \in
+ \{F,P\} \:(1 \leq i \leq r) \mbox{ and } S_1 S_2 \cdots S_k \equiv
+ S_{k+1} S_{k+2} \cdots S_r & \} \\
+\end{array}$$
+
+{\bf Theorem 7.}
+\label{thmtr}
+${\cal E}$ is not finitely based, i.\,e.\ there is no finite set of
+axioms for ${\cal E}$.
+
+\begin{proof} Assume towards a contradiction that there
+is such a finite set ${\cal E}_0$. Let ${\cal M}$ be the free model
+of $\aleph_0$ generators over ${\cal E}$ and let ${\cal M}_0$ be the
+free model of one generator over ${\cal E}_0$.
+
+Except for a possible renaming of the variable symbol $x$, ${\cal
+ E}_0$ has to be a subset of ${\cal E}$. Otherwise, ${\cal E}_0$
+would contain an equation of the form
+$$S_1(S_2(\cdots S_k(x)\cdots) = S_{k+1}(S_{k+2}(\cdots S_r(y)\cdots
+)), $$ or of the form
+$$S_1(S_2(\cdots S_k(x)\cdots) = S_{k+1}(S_{k+2}(\cdots S_r(x)\cdots
+)), \;\: S_1 S_2 \cdots S_k \not \equiv S_{k+1} S_{k+2} \cdots S_r.$$
+However, none of these equations holds in ${\cal M}$.
+
+Now let $n \in \NN$ be the maximal size of a term in ${\cal E}_0$.
+Then the equation
+$$F(\underbrace{P(P(\cdots (P}_{n}(x) ) \cdots ))) =
+F(P(F(\underbrace{P(P(\cdots (P}_{n-1}(x) ) \cdots )))))$$ holds in
+${\cal M}$, but it does not hold in ${\cal M}_0$. \qed
+\end{proof}
+
+\subsubsection{The Problem}
+\label{s4}
+
+If $R$ is an integral domain, we can form the field of fractions
+$\tf{FF}(R)$. We can also built the ring of univariate polynomials in
+the indeterminate $x$ which we will denote by $\tf{UP}(R,x)$ --- the
+ring of polynomials $R[x]$ in the standard mathematical notation ---
+which is again an integral domain by a Lemma of Gau{\ss}. Thus we can
+also built the field of fractions of $\tf{UP}(R,x)$,
+$\tf{FF}(\tf{UP}(R,x))$ --- the field of rational functions $R(x)$.
+
+Starting from an integral domain $R$ we will always get an integral
+domain and can repeatedly built the field of fractions and the ring of
+polynomials in a ``new'' indeterminate.
+
+
+Thus if a computer algebra system has a fixed integral domain $\tf{R}$
+and names for symbols $\tf{x}_0, \tf{x}_1, \tf{x}_2 \ldots$, it should
+also provide types of the form
+\begin{enumerate}
+\item $\tf{R}$, \label{l11}
+\item $\tf{FF}(\tf{R})$, \label{l12}
+\item $\tf{UP}(\tf{R},\tf{x}_0)$, \label{l13}
+\item $\tf{UP}(\tf{FF}(\tf{R}),\tf{x}_0)$, \label{l14}
+\item $\tf{FF}(\tf{UP}(\tf{R},\tf{x}_0))$, \label{l15}
+\item $\tf{UP}(\tf{UP}(\tf{R},\tf{x}_0),\tf{x}_1)$, \label{l16}
+\item
+ $\tf{UP}(\tf{FF}(\tf{UP}(\tf{R},\tf{x}_0)),\tf{x}_1)$, \label{l17}
+\item
+ $\tf{FF}(\tf{UP}(\tf{UP}(\tf{R},\tf{x}_0),\tf{x}_1))$, \label{l18}
+\item
+ $\tf{FF}(\tf{UP}(\tf{FF}(\tf{UP}(\tf{R},\tf{x}_0)),\tf{x}_1)$, \label{l19}
+\item
+ $\tf{UP}(\tf{UP}(\tf{UP}(\tf{R},\tf{x}_0),\tf{x}_1),\tf{x}_2)$, \label{l1
+ 10}
+\item[] \rule{0mm}{0mm} \vdots
+\end{enumerate}
+
+It is convenient to use the same symbols for a mathematical object and
+the symbolic expression which denotes the object. In order to clarify
+things we will sometimes use additional $\lsb \cdot \rsb$ for the
+mathematical objects.
+
+There are canonical embeddings from an integral domain into its field
+of fractions and into the ring of polynomials in one indeterminate (an
+element is mapped to the corresponding constant polynomial).
+
+It is common mathematical practice to identify the integral domain
+with its image under these embeddings. Thus the type system should
+also provide a coercion between these types, i.\,e.\ if $t$ is a type
+variable of sort $\cf{integral\_ domains}$ and $x$ is of sort
+$\cf{symbol}$, then
+$$t \subtype \tf{FF}(t)$$ and
+$$t \subtype \tf{UP}(t,x).$$
+
+However, not all of the types built by the type constructors $\tf{FF}$
+and $\tf{UP}$ should be regarded to be different. If the integral
+domain $R$ happens to be a field, then $R$ will be isomorphic to its
+field of fractions. Especially, for any integral domain $R$, $\lsb
+\tf{FF}(R)\rsb$ and $\lsb \tf{FF}(\tf{FF}(R)) \rsb$ are isomorphic.
+
+The fact that also $\lsb \tf{FF}(\tf{FF}(R)) \rsb$ can be embedded in
+$\lsb \tf{FF}(R) \rsb$ can be expressed by
+$$\tf{FF}(\tf{FF}(t)) \tf \subtype{FF}(t),$$ which is one of the
+examples given in \cite[p.~354]{Como91}.
+
+But there are more isomorphisms which govern the relations of this
+family of types.
+
+If we assume that an application of the type constructor $\tf{UP}$
+always uses a ``new'' indeterminate as its second argument, any
+application of the type constructor $\tf{FF}$ except the outermost one
+application is redundant.
+
+This observation will be captured by the following formal treatment.
+In order to avoid the technical difficulty of introducing ``new''
+indeterminates, we will use an unary type constructor $\tf{up}$
+instead the binary $\tf{UP}$. The intended meaning of $\tf{up}(t)$ is
+$\tf{UP}(t,\tf{x}_n)$, where $\tf{x}_n$ is a new symbol, i.\,e.\ not
+occurring in $t$.
+
+{\bf Definition 25.}
+{\sl Define a function {\sf trans} from $\{F,P\}^*$ into the set of types
+recursively by the following equations. For $w \in \{F,P\}^*$,
+\begin{itemize}
+\item ${\sf trans}(\varepsilon) = \tf{R}$,
+\item ${\sf trans}(Fw)=\tf{FF}({\sf trans}(w))$,
+\item ${\sf trans}(Pw)=\tf{up}({\sf trans}(w))$.
+\end{itemize}}
+
+If we take $\lsb \tf{R} \rsb$ to be the ring of integers, the
+following lemma will be an exercise in elementary
+calculus.\footnote{If we started with the ring of polynomials in
+ infinitely many indeterminates over some domain, then there would be
+ additional isomorphisms.}
+
+{\bf Lemma 7.}
+\label{letrans}
+{\sl Let $\lsb \tf{R} \rsb$ be the ring of integers. For any $v, w \in
+\{F,P\}^*$, the integral domains $\lsb {\sf trans}(v) \rsb$ and $\lsb
+{\sf trans}(w)\rsb$ are isomorphic iff $v \equiv w$.}
+
+{\sl Moreover, $\lsb {\sf trans}(v)\rsb$ can be embedded in $\lsb {\sf
+ trans}(w)\rsb$ and $\lsb {\sf trans}(w)\rsb$ can be embedded in
+$\lsb{\sf trans}(v)\rsb$ iff $\lsb{\sf trans}(v)\rsb$ and $\lsb{\sf
+ trans}(w)\rsb$ are isomorphic.}
+
+{\bf Theorem 8.}
+{\sl Let $\Sigma$ be the signature consisting of the unary function symbols
+$\tf{FF}$ and $\tf{up}$ and the constant $\tf{R}$. Let $\lsb \tf{R}
+\rsb$ be the ring of integers.}
+
+{\sl Then there is no finite set of Equations ${\cal E}' $ over $\Sigma$,
+such that for ground terms $t_1$ and $t_2$ the following holds.}
+$$ {\cal E}' \models \{ t_1 = t_2 \} \iff \mbox{$\lsb t_1\rsb$ and
+$\lsb t_2\rsb$ are isomorphic.}$$
+
+\begin{proof}
+If $t_1$ and $t_2$ are ground terms, then there are $v, w \in \{F,
+P\}^*$ such that $t_1={\sf trans}(v)$ and $t_2={\sf trans}(w)$. Now
+we are done by Lemma 7 and Theorem 7. \qed
+\end{proof}
+
+The problem is that the equational theory which describes the coercion
+relations in the example we gave is not finitely based. Since this
+property of an equational theory is {\em equivalence-invariant} in the
+sense of \cite[p.~382]{Grae79}, the use of another signature for
+describing the types does not help.
+\label{endcoerpro}
+
+\subsection{Properties of the Coercion Preorder}
+\label{secpropcoerpreord}
+
+If the type system is coherent, then the category of ground types as
+objects and the coercions as arrows is a preorder. Even if the type
+system is not coherent, a reflexive and transitive relation on the
+ground types (and even on the polymorphic types) is defined by
+``$\subtype$'', i.\,e.\ a preorder.\footnote{Notice the difference
+between {\em a category which is a preorder} and {\em a relation
+which is a preorder}.}
+
+Factoring out the equivalence classes of this reflexive and transitive
+relation we will obtain a partial order on the types.
+
+In general this order on the types will not be a lattice if we
+consider some typical examples occurring in a computer algebra system.
+Take e.\,g.\ the types $\tf{integer}$ and $\tf{boolean}$. There is no
+type which can be coerced to both of these types (unless an additional
+``empty type'' is present in the system).
+
+For many purposes, especially type inference (see
+Sec.~\ref{secaltyincoer}), it would be convenient if this partial
+ordering on the types were a quasi-lattice. In the following we will
+show that in general this will not be the case.
+
+{\bf Example 4.}
+\label{exnolatds}
+Let $\tf{I}$ be the ring of integers and let $\oplus$ denote the
+direct sum of two Abelian groups and let the direct embeddings into
+the first argument and into the second argument of this type
+constructor be present, i.\,e.\ ${\cal D}_{\oplus} = \{ 1, 2 \}$.
+Then we have
+$$\begin{array}{l} \tf{UP}(\tf{I},\tf{x}) \subtype
+ \tf{UP}(\tf{FF}(\tf{I}),\tf{x}), \\ \tf{UP}(\tf{I},\tf{x}) \subtype
+ \tf{UP}(\tf{I},\tf{x}) \oplus \tf{FF}(\tf{I}),\\ \tf{FF}(\tf{I})
+ \subtype \tf{UP}(\tf{FF}(\tf{I}),\tf{x}), \\ \tf{FF}(\tf{I})
+ \subtype \tf{UP}(\tf{I},\tf{x}) \oplus \tf{FF}(\tf{I}),
+\end{array}$$
+and no other coercions can be defined between these types. There is
+also no type $R$ with $R \neq \tf{UP}(\tf{I},\tf{x})$ and $R \neq
+\tf{FF}(\tf{I})$ such that
+$$\begin{array}{l} R \subtype \tf{UP}(\tf{FF}(\tf{I}),\tf{x}), \\ R
+ \subtype \tf{UP}(\tf{I},\tf{x}) \oplus \tf{FF}(\tf{I})
+\end{array}
+$$ (cf.\ Fig.~\ref{fignolatmde}). Thus in this case the partial
+ordering given by $\subtype$ is not a quasi-lattice (see also
+Lemma 3).
+
+\begin{figure}
+\begin{center}
+\unitlength=1.2mm
+\begin{picture}(45.00,50.00)
+\put(13.00,10.00){\makebox(0,0)[cc]{$\tf{UP}(\tf{I},\tf{x})$}}
+\put(37.00,10.00){\makebox(0,0)[cc]{$\tf{FF}(\tf{I})$}}
+\put(13.00,40.00){\makebox(0,0)[cc]{$\tf{UP}(\tf{FF}(\tf{I}),\tf{x})$}}
+\put(37.00,40.00){\makebox(0,0)[cc]{$\tf{UP}(\tf{I},\tf{x}) \oplus
+ \tf{FF}(\tf{I})$}} \put(13.00,13.00){\vector(0,1){24.00}}
+\put(13.00,13.00){\vector(1,1){24.00}}
+\put(37.00,13.00){\vector(0,1){24.00}}
+\put(37.00,13.00){\vector(-1,1){24.00}}
+\put(9.00,25.00){\makebox(0,0)[cc]{$\subtype$}}
+\put(41.00,25.00){\makebox(0,0)[cc]{$\subtype$}}
+\put(22.00,20.00){\makebox(0,0)[cc]{$\subtype$}}
+\put(28.00,20.00){\makebox(0,0)[cc]{$\subtype$}}
+\end{picture}
+\end{center}
+
+\caption{Ad Example 4}
+\label{fignolatmde}
+\end{figure}
+
+Even if we require $|{\cal D}_f| \leq 1$ for all type constructors ---
+recall that this requirement is also necessary in order to ensure a
+coherent type system --- and we have only direct embeddings and
+structural coercions then it is still possible that the partial
+ordering on types induced by ''$\subtype$'' is not a quasi-lattice.
+Consider for instance two type constructors $f:(\sigma)\sigma$ and $g:
+(\sigma)\sigma$ which we assume to be unary for simplicity. If ${\cal
+ D}_f \cap {\cal M}_f \neq \emptyset$ and ${\cal D}_g \cap {\cal M}_g
+\neq \emptyset$ and $t: \sigma$, then
+$$g(t) \subtype f(g(t)) \quad\quad\mbox{and}\quad\quad g(t) \subtype
+g(f(t))$$ and similarly
+$$f(t) \subtype g(f(t)) \quad\quad\mbox{and}\quad\quad f(t) \subtype
+f(g(t))$$ (cf.\ Fig.~\ref{fignolatoth}). Having only direct
+embeddings and structural coercions the condition imposed in
+Lemma 3 with $a = g(t)$, $b=f(t)$, $c=f(g(t))$ and
+$d=g(f(t))$ are fulfilled.
+
+\begin{figure}
+\begin{center}
+\unitlength=1.2mm
+\begin{picture}(45.00,50.00)
+\put(13.00,10.00){\makebox(0,0)[cc]{$g(t)$}}
+\put(37.00,10.00){\makebox(0,0)[cc]{$f(t)$}}
+\put(13.00,40.00){\makebox(0,0)[cc]{$g(f(t))$}}
+\put(37.00,40.00){\makebox(0,0)[cc]{$f(g(t))$}}
+\put(13.00,13.00){\vector(0,1){24.00}}
+\put(13.00,13.00){\vector(1,1){24.00}}
+\put(37.00,13.00){\vector(0,1){24.00}}
+\put(37.00,13.00){\vector(-1,1){24.00}}
+\put(9.00,25.00){\makebox(0,0)[cc]{$\subtype$}}
+\put(41.00,25.00){\makebox(0,0)[cc]{$\subtype$}}
+\put(22.00,20.00){\makebox(0,0)[cc]{$\subtype$}}
+\put(28.00,20.00){\makebox(0,0)[cc]{$\subtype$}}
+\end{picture}
+\end{center}
+
+\caption{Another counter-example for the coercion order}
+\label{fignolatoth}
+\end{figure}
+
+The type constructors $\tf{FF}$ and $\tf{up}$ have such properties.
+However, we can define
+$$\tf{up}(\tf{FF}(R)) \subtype \tf{FF}(\tf{up}(R))$$ for any integral
+domain $R$ using a coercion which is not a direct embedding nor a
+structural coercion.
+
+So in this case some ``ad hoc knowledge'' can be used to avoid that
+the partial ordering induced by $\subtype$ is not a quasi-lattice.
+
+In general, it does not seem to be justified to assume that the
+partial ordering induced by $\subtype$ is a quasi-lattice.
+
+\subsection{Combining Type Classes and Coercions}
+\label{seccomtcco}
+
+Let
+$${\rm op} : \overbrace{v_\sigma \times \cdots \times v_\sigma}^{n}
+\longrightarrow v_\sigma$$ be an $n$-ary operator defined on a type
+class $\sigma$ and let $A \subtype B$ be types belonging to $\sigma$
+and let
+$$ \phi: A \longrightarrow B$$ be the coercion function. Moreover,
+let ${\rm op}_A$ and ${\rm op}_B$ be the instances of ${\rm op}$ in
+$A$ resp.\ $B$.
+
+For $a_1, \ldots, a_n \in A$ the expression
+$${\rm op}(a _1, \ldots, a_n)$$ might denote different objects in $B$,
+namely
+$${\rm op}_B(\phi(a_1), \ldots, \phi(a_n))$$ or
+$$\phi({\rm op}_A(a_1, \ldots, a_n)).$$
+
+The requirement of a unique meaning of
+$${\rm op}(a _1, \ldots, a_n)$$ just means that $\phi$ has to be a
+{\em homomorphism} for $\sigma$ with respect to ${\rm op}$.
+
+The typing of ${\rm op}$ in the example above is only one of several
+possibilities. In general if $\sigma$ is a type class having
+$p_{\tau_1}, \ldots, p_{\tau_k}$ as parameters ---
+i.\,e.\ $p_{\tau_i}$ is a type variable of sort $\tau_i$ --- then a
+$n$-ary first-order operation ${\rm op}$ defined in $\sigma$ can have
+the following types.\footnote{For simplicity, we will exclude in the
+ following discussion arbitrary polymorphic types different from type
+ variables. Especially, we will not regard higher-order functions,
+ which do not play a central role in computer algebra although they
+ are useful, cf.\ Sec.~\ref{posappcom}. For the other relevant cases
+ of polymorphic types the following can be generalized easily.}
+$${\rm op}: \xi_1 \times \cdots \times \xi_n \longrightarrow
+\xi_{n+1},$$ where $\xi_i$, $1 \leq i \leq n+1$, is either $v_\sigma$,
+or $p_{\tau_l}$, $l \leq k$, or a ground type $t_m$.
+
+As on page~\pageref{defcatsigmai} let $\cat{C}_{\sigma}$ be the
+category of ground types of sort $\sigma$ as objects and the coercions
+as arrows. For a ground type $t$ let $\cat{C}_t$ be the subcategory
+which has $t$ as single object and has thus the identity on $t$ as
+single arrow.\footnote{If the type system is not coherent this
+ subcategory might have more than one arrow.} Now let
+$$ \cat{C}_i=\left\{
+\begin{array}{ll}
+\cat{C}_{\sigma}, & \mbox{if }\xi_i = v_\sigma,\\ \cat{C}_{\tau_l}, &
+\mbox{if }\xi_i = p_{\tau_l},\\ \cat{C}_{t_m}, & \mbox{if }\xi_i = t_m
+\mbox{ for a ground type $t_m$}.
+\end{array}
+\right.
+$$
+
+Let $\rtypeasop$ be a functor from $\cat{C}_1 \times \cdots \times
+\cat{C}_n$ into $\cat{C}_{n+1}$. If $(\zeta_1, \ldots, \zeta_n)$ is
+an object of $\cat{C}_1 \times \cdots \times \cat{C}_n$, i.\,e.\
+$$ \zeta_i=\left\{
+\begin{array}{ll}
+A_{\sigma}, & \mbox{if }\xi_i = v_\sigma \mbox{ and $A_\sigma$ is a
+ ground type belonging to $\sigma$},\\ A_{\tau_l}, & \mbox{if }\xi_i
+= p_{\tau_l}\mbox{ and $A_{\tau_l}$ is a ground type belonging to
+ $\tau_l$},\\ t_m, & \mbox{if }\xi_i = t_m,
+\end{array}
+\right.
+$$ then $\rtypeasop(\zeta_1, \ldots, \zeta_n)$ is an object of
+$\cat{C}_{n+1}$, i.\,e.\ a ground type belonging to $\sigma$
+resp.\ $\tau_{l'}$, or is a ground type $t_{m'}$ depending on the
+value of $\xi_{n+1}$.
+
+Informally $\rtypeasop$ can be used to specify the type of the range
+of an instantiation of ${\rm op}$ if instantiations of $\sigma$ and
+the parameters of $\sigma$ are given. We need a functor $\rtypeasop$
+because of the following reason. Given two instantiations of the type
+class which can be described by $(\zeta_1, \ldots, \zeta_n)$ and
+$(\zeta'_1, \ldots, \zeta'_n)$ such that
+$$\zeta_i \subtype \zeta'_i \quad \forall i \leq n$$ it is necessary
+that
+$$\rtypeasop(\zeta_1, \ldots, \zeta_n) \subtype \rtypeasop(\zeta'_1,
+\ldots, \zeta'_n).$$ Otherwise, if $a_i$ is an object of type
+$\zeta_i$, $1 \leq i \leq n$, the expression
+$${\rm op}(a_1, \ldots, a_n)$$ has the types $\rtypeasop(\zeta_1,
+\ldots, \zeta_n)$ and $\rtypeasop(\zeta'_1, \ldots, \zeta'_n)$ for
+which a coercion has to be defined in order to give the expression a
+unique meaning.
+
+If $\sigma$ is a non-parameterized type class {\em any} mapping
+assigning an appropriate type to a tuple $(\zeta_1, \ldots, \zeta_n)$
+can be extended to a functor. So the requirement that $\rtypeasop$ is
+a functor is only a restriction for parameterized type classes.
+
+Since in a coherent type system there are unique coercions between
+types, we will omit the names of the coercions in the following and we
+will write
+$$\rtypeasop(\zeta_1 \subtype \zeta'_1, \ldots, \zeta_n \subtype
+\zeta'_n)$$ for the image of the single arrow between the objects
+ $$(\zeta_1, \ldots, \zeta_n) \mbox{ and } (\zeta'_1, \ldots,
+\zeta'_n)$$ in the category
+$$\cat{C}_1 \times \cdots \times \cat{C}_n$$ under the functor
+$\rtypeasop$. Thus $\rtypeasop(\zeta_1 \subtype \zeta'_1, \ldots,
+\zeta_n \subtype \zeta'_n)$ is an arrow in $\cat{C}_{n+1}$.
+
+Let $\catofsets$ be the category of all set as objects and functions
+as arrows.\footnote{Notice that the category theoretic object
+ $\catofsets$ is quite different from the {\sf Axiom} category {\tt
+ SetCategory}.}
+
+By the assumption of set theoretic ground types and coercion functions
+we can assign to any object of $\cat{C}_\sigma$ an object of
+$\catofsets$ and to any arrow in $\cat{C}_\sigma$ an arrow of
+$\catofsets$ in a functorial way. We will write
+$\typesetinterpr{\cat{C}_\sigma}$ for the functor defined by this
+mapping.
+
+We will use the notation $\zeta_i \subtype \zeta'_i$ to denote the
+single arrow between $\zeta_i$ and $\zeta'_i$ in $\cat{C}_i$. Thus
+$$ \typesetinterpr{\cat{C}_1} \times \cdots \times
+\typesetinterpr{\cat{C}_n}(\zeta_1 \subtype \zeta'_1, \ldots, \zeta_n
+\subtype \zeta'_n)
+$$ is an arrow in the category
+$$\underbrace{\catofsets \times \cdots \times \catofsets}_{n}.$$ Since
+$n$-tuples of sets are sets there is a functor from $\catofsets ^n$
+into $\catofsets$ which we will denote by $\flatsetn$.
+
+If $(\zeta_1, \ldots, \zeta_n)$ is an object in $\cat{C}_1 \times
+\cdots \times \cat{C}_n$ we are now ready to formalize a requirement
+on the instantiation of ${\rm op}$ given by $(\zeta_1, \ldots,
+\zeta_n)$. We will not impose this condition directly on ${\rm
+ op}_{(\zeta_1, \ldots, \zeta_n)}$. It will be convenient to regard
+the set-theoretic interpretation
+$$\typesetinterpr{\cat{C}_1} \times \cdots \times
+\typesetinterpr{\cat{C}_n}(\zeta_1, \ldots, \zeta_n)$$ of $(\zeta_1,
+\ldots, \zeta_n)$ instead this $n$-tuple of types itself. Then the
+set-theoretic interpretation of ${\rm op}_{(\zeta_1, \ldots,
+ \zeta_n)}$ induces a function between
+$$\flatsetn(\typesetinterpr{\cat{C}_1} \times \cdots \times
+\typesetinterpr{\cat{C}_n}(\zeta_1, \ldots, \zeta_n))$$ and
+$$\typesetinterpr{\cat{C}_{n+1}}(\rtypeasop(\zeta_1, \ldots,
+\zeta_n)),$$ which we will denote by $\opinterprset{{\rm op}}(\zeta_1,
+\ldots, \zeta_n)$.
+
+Given $(\zeta_1, \ldots, \zeta_n)$ and $(\zeta'_1, \ldots, \zeta'_n)$
+such that
+$$\zeta_i \subtype \zeta'_i \quad \forall i \leq n$$ we just need that
+the following diagram is commutative.
+
+\begin{center}
+\resetparms \setsqparms[1`1`1`1;2000`700]
+\square[\flatsetn(\typesetinterpr{\cat{C}_1} \times \cdots \times
+ \typesetinterpr{\cat{C}_n}(\zeta_1, \ldots, \zeta_n))`
+ \typesetinterpr{\cat{C}_{n+1}}(\rtypeasop(\zeta_1, \ldots,
+ \zeta_n))` \flatsetn(\typesetinterpr{\cat{C}_1} \times \cdots \times
+ \typesetinterpr{\cat{C}_n}(\zeta'_1, \ldots, \zeta'_n))`
+ \typesetinterpr{\cat{C}_{n+1}}(\rtypeasop(\zeta'_1, \ldots,
+ \zeta'_n)); \opinterprset{{\rm op}}(\zeta_1, \ldots, \zeta_n)` {\rm
+ L}` {\rm R}` \opinterprset{{\rm op}}(\zeta'_1, \ldots, \zeta'_n)]
+\end{center}
+
+In the diagram above we have set
+$${\rm L} =\flatsetn( \typesetinterpr{\cat{C}_1} \times \cdots \times
+\typesetinterpr{\cat{C}_n}(\zeta_1 \subtype \zeta'_1, \ldots, \zeta_n
+\subtype \zeta'_n))$$ and
+$${\rm R} = \typesetinterpr{\cat{C}_{n+1}}(\rtypeasop (\zeta_1
+\subtype \zeta'_1, \ldots, \zeta_n \subtype \zeta'_n)).
+$$
+
+This requirement on $\opinterprset{{\rm op}}$ can be read that
+$\opinterprset{{\rm op}}$ is a {\em natural transformation} between
+the functor
+$$\flatsetn \circ (\typesetinterpr{\cat{C}_1} \times \cdots \times
+\typesetinterpr{\cat{C}_n})$$ and the functor
+$$\typesetinterpr{\cat{C}_{n+1}} \circ \rtypeasop .$$
+
+Thus for a $n$-ary first-order operator ${\rm op}$ the requirements
+that
+\begin{enumerate}
+\item the assignments of a range type for an operation given
+ instantiations of a type class and its parameters has to be
+ ``functorial'' and
+\item the instantiation of the operator has to correspond to a natural
+ transformation between functors giving the set-theoretic
+ interpretations of the ground types and the coercions between them
+\end{enumerate}
+will guarantee that type classes and coercions interact nicely,
+i.\,e.\ give expressions involving ${\rm op}$ a unique meaning.
+
+
+A brief inspection of the examples of parameterized type classes
+occurring in {\sf Axiom} by the author has suggested that there is no
+example violating the first requirement which will always hold in
+non-parameterized type classes. Nevertheless, a formal requirement
+for a computer algebra language seems to be useful to ensure that no
+such violating will occur in future extensions.
+
+The second requirement is formulated as one on the possible
+instantiations of operators. However, it can also be read that given
+the instantiations only certain coercions between base types are
+allowed, namely only coercions for which the interpretation is a
+natural transformation. We will show below that using this view we
+can conclude that only ``injective'' coercion functions are allowed
+between most types.\footnote{In the following we will precisely state
+ what we mean by ``injective'' and ``most types.''}
+
+
+\begin{remark}
+Our conditions imposed on the combination of type classes and
+coercions are an adaptation of the work of Reynolds \cite{Reyn80}
+on {\em category-sorted algebras}. The difference is that Reynolds
+allows each operator to be generic, i.\,e.\ that it may be
+instantiated with any type in any position. We allow type-class
+polymorphism at some position and do not allow polymorphism at all in
+other positions which seems to be the natural way to describe many
+important examples.
+\end{remark}
+
+\subsubsection{Injective Coercions}
+
+An important type class is the class of types on which a test for
+equality of objects can be performed in the system.\footnote{It is
+called {\tt Eq} in {\sf Haskell} and {\tt SetCategory} in
+{\sf Axiom}.} In this type class the operator symbol
+$$= \: : t_{\cf{Eq}} \times t_{\cf{Eq}} \longrightarrow \tf{Boolean}$$
+is used to denote the system test for equality. In order to
+distinguish between the ``system equality'' and ``true equality'' we
+will use
+$${\tt isequal} : t_{\cf{Eq}} \times t_{\cf{Eq}} \longrightarrow
+\tf{Boolean}$$ for the system equality in the following.
+
+Then the boolean values of
+$${\tt isequal}(a_1, a_2)$$ and
+$${\tt isequal}(\phi(a_1),\phi(a_2))$$ have to be the same.
+Especially, if the latter evaluates to {\tt true} then the former also
+has to evaluate to {\tt true}. In analogy to the definition of
+injective this means that $\phi$ has to be an injective function
+``modulo system equality'' (usually, the definition of injective
+involves true equality).
+
+Thus coercions between types belonging to the ``equality type class''
+have to be ``injective.''
+
+The system equality for a type might very well differ from the
+equality defined on a certain data type representing it. So very
+often the rational numbers are just represented as pairs of integers.
+Then different pairs of integers can represent the same rational
+number, thus the system test for equality of rational numbers is
+different from the equality on pairs of integers.
+
+Of course, a non-injective coercion function would not violate our
+requirements, if $A$ and $B$ do not use the same operator symbol as a
+test for equality. Thus defining two different type classes
+$\cf{Eq1}$ and $\cf{Eq2}$ with operators ${\tt isequal1}$ resp.\ ${\tt
+ isequal2}$ as tests for equality and having $A$ of type class
+$\cf{Eq1}$ and $B$ of type class $\cf{Eq2}$ would allow to define a
+non-injective function to be a coercion between $A$ and $B$. Defining
+such different type classes is also a clear indication for the user
+that there are problems. Exposing a problem seems to be preferable
+than hiding it and and hoping that it will not occur. Although
+usually for two elements $a_1$ and $a_2$ of type $A$ the test for
+equality in $A$ will be used and not the one in $B$ it might happen
+that one of the elements is coerced to $B$. Probably, this will not
+happen very frequently which makes the situation even more dangerous,
+since the system will wrongly say that two elements are equal only in
+situations which are rather complicated so that the behavior of the
+system might not be clear for the user.\footnote{For instance, the
+ situation described above arises when coercions between (arbitrary
+ precision) integers and floating point numbers are defined and the
+ same symbol is used as a test for equality. Then two integers $a$
+ and $b$ which are not equal might be equal if they are coerced to
+ floating point numbers. Such a coercion is used in many system if
+ an expression like ``$a+0.0$'' occurs and can thus happen in
+ situations which are quite surprising for the user.}
+
+So the requirement of ``injective'' coercions seems to be absolutely
+necessary for a computer algebra system although it is not required by
+a system like {\sf Axiom}!\footnote{Since it is an undecidable problem
+ to check whether a given recursive function is injective --- which
+ can be easily proved by applying Rice's Theorem --- it is not
+ possible to enforce by a compiler that coercions are injective if
+ functions defined by arbitrary code can be declared to be coercions.
+ Nevertheless, it seems to be useful to state this requirement as a
+ guideline for a programmer.}
+
+\subsection{Type Inference}
+\label{sectypinfcoer}
+
+In Sec.~\ref{sectytycl} we have seen that the type inference problem
+for a language having type classes is decidable even if we have a
+language with higher-order functions and one allowing parametric
+polymorphism. Moreover, there is a finite set of types for any object
+of the language such that any type of the object is a substitution
+instance of one of those types.
+
+The type inference problem for a language with coercions is much more
+complicated. So there are objects which have infinitely many types
+which are not substitution instances of finitely many (polymorphic)
+types.\footnote{Using the results of Sec.~\ref{secaltyincoer} it will
+ be possible to assign finitely many types to an object in the
+ subsystem described in that section which have ``minimal
+ properties'' among all types of the object.} Consider a type
+$\tf{R}$ belonging to a type class $\cf{commutative\_ring}$ and let
+$r$ be be an object of type $\tf{R}$. Given coercions
+$$v_{\cf{commutative\_ring}} \subtype
+\tf{up}(v_{\cf{commutative\_ring}})$$ then $r$ also has the types
+$$\tf{up}(\tf{R}), \tf{up}(\tf{up}(\tf{R})), \ldots$$
+
+In \cite{Mitc91}, \cite{Fuhx89}, \cite{Fuhx90} type systems for
+functional languages allowing coercions between base types and
+structural coercions are given and type inference algorithms for them.
+These systems do not allow type class polymorphism nor parametric
+polymorphism. In \cite{Brea89}, \cite{Brea91} a system having
+coercions and parametric polymorphism is given; however, no type
+inference for the system is provided.
+
+In \cite{That91} a type inference system for the case of type
+isomorphisms induced by coercions is given which allows parametric
+polymorphism. However, as is argued in \cite{That91} if the
+equational theory corresponding to the type isomorphisms is not
+unitary unifying then the semantics of an expression involving {\tt
+ let} may be ambiguous. Moreover, the type inference problem is
+reduced to an unification problem over the equational theory
+corresponding to the type isomorphisms. So in the case of an
+undecidable equational unification problem
+(cf.\ Example 3) only a semi-decision method is available
+for type inference.
+
+Type inference algorithms for a system allowing parametric
+polymorphism and records resp.\ variants are given in \cite{Wand87},
+\cite{Wand88}, \cite{Wand89}, \cite{Wand91}, \cite{Stan88},
+\cite{Leis87}, \cite{Remy89}. Since variants can be used to model
+classes of isomorphic types some of these results can be applied if we
+model classes of isomorphic types as variants.
+
+Kaes \cite{Kaes92} gives a system allowing type-class polymorphism
+(also parametric type classes can be described) which can handle
+coercions between base types and structural coercions according to our
+definition.\footnote{In the systems in \cite{Mitc91}, \cite{Fuhx90},
+\cite{Fuhx89} all type constructors have to be monotonic or
+antimonotonic in all arguments.} However, direct embeddings are not
+allowed.
+
+In \cite{Como91} a type inference system and a semi-decision procedure
+for it are described. However, in that system some assumptions on the
+properties on coercions are imposed which are not justified for many
+examples occurring in computer algebra.\footnote{The problematic
+assumptions are that all type constructors have to be monotonic in
+all arguments and that any polymorphic type can be coerced to its
+substitution instances.} In \cite{Como91} a proof is given that the
+type inference problem for the described system becomes undecidable if
+no restrictions on the coercions are imposed.
+
+Since there are infinitely many ground types in a system usually
+infinitely many coercions will be necessary. However, with the
+exception of the example stated in Sec.~\ref{scoercprbl} all examples
+of coercions we have given --- such as the direct embeddings and the
+structural coercions --- can be described by a finite set of Horn
+clauses which will usually have variables. The formalism of Horn
+clauses is strong enough to capture type classes and even parametric
+type classes and also polymorphic types can be described. Then the
+typability of an object can be stated as the question whether a
+certain clause is the logical consequence of the given set of Horn
+clauses. Thus using a complete Horn clause theorem
+prover\footnote{Notice that {\sf PROLOG} is not one because of the
+used depth-first search strategy.} we have a semi-decision
+procedure for type inference. The size of the search space seems to
+be a problem for the practical use of this method, but not the fact
+that it is only a semi-decision procedure. If an expression cannot be
+typed using certain resources --- i.\,e.\ a typing of the expression
+involves too many coercions if it is typeable at all --- it does not
+seem to be a practical limitation if a system rejects the expression
+as possibly untypeable and asks the user to provide more typing
+information if the user thinks that the expression is typeable.
+
+It is not clear which classes of coercions in connection with which
+other typing constructs are allowed such that the type inference
+problem is decidable. Coercions between polymorphic types are
+certainly a problem. In the following we will shortly discuss to what
+extent some restrictions are justified for a computer algebra system.
+
+If type inference has to be performed for user defined functions, then
+polymorphic types arise naturally (cf.\ Sec.~\ref{secaxhasex}). Since
+the possibility to type user defined functions is useful for a
+computer algebra system but does not play the same central role as for
+a functional programming language it might be reasonable to exclude
+them from type inference if coercions are present in order to
+facilitate the problem.
+
+But there are also other objects than functions that can be
+polymorphic. Especially there are naturally occurring examples of
+{\em polymorphic constants}.
+
+In {\sf Haskell} integer constants are polymorphic constants. If $n$
+is a constant denoting an integer then it also denotes the
+corresponding objects of the types in the type class {\tt Num}.
+Having a language allowing coercions the use of polymorphic constants
+can be avoided for the examples used in {\sf Haskell}, because
+coercions can be defined between the types belonging to {\tt Num} in
+{\sf Haskell}.\footnote{In {\sf Haskell} only explicit conversions but
+no implicit coercions are allowed.}
+
+In a computer algebra system there are more types present which have
+objects usually denoted by integer constants. A nice example showing
+the use of polymorphic constants in mathematical notation is given by
+Rector \cite[p.~304]{Rect89}:
+\begin{quote}
+Consider
+$$\frac{(x+y)^{1+n}+1}{1+nx}$$
+where the user wants to work with rational functions
+over a finite field of $p$-elements.
+This formula presents the problem of polymorphic constants.
+To a mathematician, the types of each subexpression are immediately clear:
+$n$ is an integer variable which must be reduced modulo $p$
+in the denominator of the expression, $x$ and $y$ are finite field
+variables, $1$ appearing in the exponent is an integer and
+the other $1$'s are the multiplicative identity of the finite field.''
+\end{quote}
+Since there are no embeddings from $\ZZ$ into
+$\ZZ_m$ nor from $\ZZ_m$ into $\ZZ$ ---
+for $n \neq m$ there is not even one from
+the ring $\ZZ_m$ into the ring $\ZZ_n$\footnote{If $n =km$
+then there is an embedding of the {\em Abelian group}
+$\langle \ZZ_m,+ \rangle$ into the Abelian group
+$\langle \ZZ_n,+ \rangle$, namely the one given by the mapping
+$i \mapsto ki$. Notice that a declaration of this embedding to
+be a coercion between the corresponding types
+and to have the elements of $\ZZ$ as polymorphic constants
+(in their usual interpretation) in $\langle \ZZ_m,+ \rangle$
+and in $\langle \ZZ_n,+ \rangle$ would contradict
+the requirements stated in Sec.~\ref{seccomtcco}.}
+the use of polymorphic constants cannot
+be avoided by introducing coercions.
+
+\subsubsection{Algorithms for Type Inference}
+\label{secaltyincoer}
+
+In the following section we will restrict the types
+to the ones which can be expressed as terms
+of a finite order-sorted signature.
+As we have seen in Sec.~\ref{secproossty}
+we can also assume that the signature is regular.
+
+Let ${\rm op}$ be a $n$-ary operation,
+$${\rm op}: \xi_1 \times \cdots \times \xi_n \longrightarrow
+ \xi_{n+1},$$
+where $\xi_i$, $1 \leq i \leq n+1$,
+is either a type variable $v_{\tau_l}$, $l \leq k$,
+or a ground type $\overline{t}_i$.
+Given objects
+$o_1, \ldots , o_n$ having types
+$t_1, \ldots, t_n$ respectively,
+the expression
+$${\rm op}(o_1, \ldots, o_n)$$
+will be well typed having type $\xi_{n+1}$
+iff the following conditions are satisfied.
+\begin{enumerate}
+\item If $\xi=\overline{t}_i$ for some ground type
+$\overline{t}_i$ then $t_i \subtype \overline{t}_i$.
+\item If $\xi_i = \xi_j = v_{\tau_l}$ for some
+$i \neq j$ then there is a type $t : \tau_l$
+such that
+$t_i \subtype t$ and $t_j \subtype t$.
+\item If $\xi_i = v_{\tau_k}$ then there is a type
+$t:\tau_k$ such that $t_i \subtype t$.
+\end{enumerate}
+
+Notice that if we require that all
+objects have ground types then algorithms solving the problems
+imposed by the above conditions can be used
+to solve the type inference problem using
+a bottom-up process.\footnote{Similar ideas can be found
+in \cite[Sec.~4]{Como91} and in \cite{Rect89}.}
+
+If we do not restrict the possible coercions
+then determining whether for given types
+$t_1$ and $t_2$ there is a type $t$ such that
+$t_1 \subtype t$ and $t_2 \subtype t$ might be an
+undecidable problem (cf.\ \cite{Como91}).
+
+In the following we will restrict the possible coercions
+to coercions between base types,\footnote{By the assumption of
+a finite signature there are only finitely many base types and we will
+assume that the finitely many
+coercions between base types are effectively given.}
+ direct embeddings and structural coercions.
+In Sec.~\ref{seccoh} we have defined
+the coercions only between ground types, because we
+have given semantic considerations on coercions
+and it is not clear how to define a semantics
+for arbitrary polymorphic types.
+The algorithmic problems we are dealing with
+in this section can be seen as algorithmic problems
+on certain terms of an order-sorted signature where
+an additional relation ``$\subtype$'' is given.
+It will be convenient to define
+$\subtype$ also for polymorphic types, i.\,e.\ non-ground terms.
+It is clear how the definitions given in
+Sec.~\ref{seccoh} for direct embeddings and
+structural coercions can be extended to
+polymorphic types.
+
+We will assume that for any type constructor $f$
+the set of direct embedding positions ${\cal D}_f$
+and the sets ${\cal A}_f$ and ${\cal M}_f$
+are well defined, i.\,e.\ independent
+of the arguments of $f$.
+Moreover, we will assume that
+for any types $t_1 \subtype t_2$ and any (sort-correct)
+substitution $\theta$ we also have
+$\theta(t_1) \subtype \theta(t_2)$.
+These assumptions
+are satisfied by all examples we gave and
+are natural for the formalism of describing
+types we use.
+
+The advantage of extending
+the notions of direct embeddings and
+structural coercions to polymorphic types is
+that there are {\em finitely} many (polymorphic) types
+$$t_1^1 \subtype t_1^2 , \ldots, t_r^1 \subtype t_r^2$$
+such that for any types
+$t_1 \subtype t_2$ there is
+a (sort-correct) substitution $\theta$ and
+an $1 \leq i \leq r$ such that
+$$t_1 = \theta(t_i^1) \quad\mbox{and}\quad t_2 =\theta(t_i^2).$$
+
+{\bf Proposition 2.}
+{\sl Assume that the types are terms of a finite, regular
+order-sorted signature and that there are only
+coercions between base types, direct embeddings and
+structural coercions. Then for any type $t$, the set}
+$${\cal S}_t = \{ \sigma \mid \exists t'\, . \, t' : \sigma \mbox{ and }
+t \subtype t' \}$$
+{\sl is effectively computable.}
+
+\begin{proof}
+We claim that the set ${\cal S}_t$
+will be computed by ${\sf CSGT}(t)$
+(see Fig.~\ref{figalCSGT}).
+
+All computations which are used in
+${\sf CSGT}$ and ${\sf CSBT}$
+can be performed effectively.
+Since the signature is finite
+there are always only finitely many possibilities
+which have to be checked in the existential clauses of
+the algorithms and so
+algorithm ${\sf CSBT}$ will terminate and so will
+${\sf CSGT}$.
+Algorithm ${\sf CSGT}$
+is correct (i.\,e.\ ${\sf CSGT}(t) \subseteq {\cal S}_t$),
+because only types and
+the sort of types $t$ can be coerced to are computed.
+Its completeness
+(i.\,e.\ ${\sf CSGT}(t) \supseteq {\cal S}_t$)
+ follows from the fact that structural
+coercions cannot add new sorts to ${\cal S}_t$.
+\qed
+
+\newsavebox{\algcsbt}
+\newsavebox{\algcsgt}
+\newsavebox{\algcsgtandcsbt}
+\sbox{\algcsbt}{\begin{minipage}[l]{0.94\textwidth}
+\begin{center}
+${\cal S} \leftarrow {\sf CSBT}(t)$.
+\end{center}
+[Sorts of types a base type $t$ is coercible to.
+ ${\cal S}$ is the set
+of sorts of types in which $t$ can be coerced to.
+Assumes that the signature is finite, only direct embeddings
+and structural coercions are present.]
+\begin{deflist}{(1)}
+\item[(1)] [Initialize.] ${\cal T}
+ \leftarrow \{ t' \mid t \subtype t' \mbox{ and }
+ t' \mbox{ is a base type}
+ \}$; \\
+${\cal S} \leftarrow \{ \sigma' \mid t : \sigma' \}$;
+${\cal S}' \leftarrow {\cal S}$; ${\cal T}' \leftarrow {\cal T}$.
+\item[(2)] [Compute Direct Embeddings.]
+{\bf for} $\overline{t} \in {\cal T}$
+ {\bf do}
+ $\aldesbegbr$
+{\bf if} there are
+ $\overline{\sigma}$,
+ $f :(\sigma_1 \cdots \sigma_n) \sigma'$,
+ $i \in \{ 1, \ldots, n \}$ such that
+ $\overline{t} : \overline{\sigma}$
+ and $\sigma_i=\overline{\sigma}$
+ and $i \in {\cal D}_f$
+ and $\sigma' \notin {\cal S}$
+{\bf then}
+ $\aldesbegbr$
+ ${\cal S}' \leftarrow {\cal S}' \cup \{ \sigma' \}$;
+ ${\cal T}' \leftarrow {\cal T}' \cup
+ \{ f(v_{\sigma_1}, \ldots, v_{\sigma_n}) \}$
+ $\aldesendbr$ $\aldesendbr$.
+\item[(3)] [Iterate if something is added.] {\bf if}
+ ${\cal S'} \neq {\cal S}$
+ {\bf then} $\aldesbegbr$ \\
+ ${\cal S} \leftarrow {\cal S}'$;
+ ${\cal T} \leftarrow {\cal T}' $;
+{\bf goto~(2)} $\aldesendbr$.
+\end{deflist}
+\end{minipage}
+}
+\sbox{\algcsgt}{\begin{minipage}[l]{0.94\textwidth}
+\begin{center}
+${\cal S} \leftarrow {\sf CSGT}(t)$.
+\end{center}
+[Sorts of types a type $t$ is coercible to.
+ ${\cal S}$ is the set
+of sorts of types in which $t$ can be coerced to.
+Assumes that the signature is finite, only direct embeddings
+and structural coercions are present.]
+\begin{deflist}{(1)}
+\item[(1)] [$t$ base type.] {\bf if} $\com(t)=0$ {\bf then}
+ $\aldesbegbr\,
+ {\cal S} \leftarrow {\sf CSBT}(t)$;
+ {\bf return}$\aldesendbr$.
+\item[(2)] [Recurse.] Let $t=g(t_1, \ldots, t_m)$; \\
+ {\bf for} $i = 1, \ldots, m$
+ {\bf do} ${\cal S}_i \leftarrow {\sf CSGT}(t_i)$; \\
+ {\bf for} $(\sigma_1, \ldots, \sigma_m) \in
+ {\cal S}_1 \times \cdots \times {\cal S}_m$
+ {\bf do} $\aldesbegbr$ \\
+ {\bf if}
+ there is
+ $g: (\sigma_1 \cdots \sigma_m) \overline{\sigma}$
+ such that
+ $\overline{\sigma} \notin {\cal S}$
+ {\bf then}
+ $\aldesbegbr$ \\
+ ${\cal T} \leftarrow
+ {\cal T} \cup \{ g(v_{\sigma_1}, \ldots, v_{\sigma_m}) \}$;
+ ${\cal S} \leftarrow {\cal S} \cup \{ \overline{\sigma} \} $;
+ ${\cal S}' \leftarrow {\cal S}$;
+ ${\cal T}' \leftarrow {\cal T}$
+ $\aldesendbr$ $\aldesendbr$.
+\item[(3)] [Compute Direct Embeddings.]
+{\bf for} $\overline{t} \in {\cal T}$
+ {\bf do}
+ $\aldesbegbr$
+{\bf if} there are
+ $\overline{\sigma}$,
+ $f :(\sigma_1 \cdots \sigma_n) \sigma'$,
+ $i \in \{ 1, \ldots, n \}$ such that
+ $\overline{t} : \overline{\sigma}$
+ and $\sigma_i=\overline{\sigma}$
+ and $i \in {\cal D}_f$
+ and $\sigma' \notin {\cal S}$
+{\bf then}
+ $\aldesbegbr$
+ ${\cal S}' \leftarrow {\cal S}' \cup \{ \sigma' \}$;
+ ${\cal T}' \leftarrow {\cal T}' \cup
+ \{ f(v_{\sigma_1}, \ldots, v_{\sigma_n}) \}$
+ $\aldesendbr$ $\aldesendbr$.
+\item[(4)] [Iterate if something is added.] {\bf if}
+ ${\cal S'} \neq {\cal S}$
+ {\bf then} $\aldesbegbr$ \\
+ ${\cal S} \leftarrow {\cal S}'$;
+ ${\cal T} \leftarrow {\cal T}' $;
+{\bf goto~(3)} $\aldesendbr$.
+\end{deflist}
+\end{minipage}
+}
+\sbox{\algcsgtandcsbt}{
+\begin{minipage}[l]{0.96\textwidth}
+\begin{center}
+\usebox{\algcsgt}
+\end{center}
+\vspace{2\bigskipamount}
+where
+\vspace{2\bigskipamount}
+\begin{center}
+\usebox{\algcsbt}
+\end{center}
+\end{minipage}
+}
+\begin{figure}[tbhp]
+\hfil\fbox{\usebox{\algcsgtandcsbt}}
+\caption{Algorithms computing sorts of
+ types a given type can be coerced to}
+\label{figalCSGT}
+\end{figure}
+
+
+\end{proof}
+
+In the following we will rule out
+antimonotonic structural coercions,
+i.\,e.\ we will require that
+${\cal A}_f = \emptyset$ for all
+type constructors $f$.
+
+Notice that the restriction
+${\cal A}_f = \emptyset$
+does not exclude type constructors like
+$\tf{FS}$ from the framework.
+Only the automatic insertion of a coercion
+giving rise to the antimonotony is excluded.
+For instance, instead of having $\tf{FS}$ as
+a type constructor which is
+antimonotonic in its first argument and monotonic in
+its second, it is one which is only monotonic in its
+second argument. Such a restriction does not seem
+to cause a loss of too much expressiveness.
+This is an important difference to the system
+in \cite{Como91}, in which all type constructors
+have to be monotonic in all arguments.
+Type constructors which are antimonotonic in some
+argument have to be excluded from that system in general,
+because it is not possible that a type constructor being
+antimonotonic in some argument can be made monotonic
+in that argument without changing the
+intended meaning of the type constructor.
+Thus our framework is more general in this respect
+than the one in \cite{Como91}.
+However, direct embeddings are a special form
+of the ``rewrite relations'' for coercion considered
+in that paper.
+So the following can be seen as a solution
+for one of the open problems stated in \cite{Como91},
+namely finding restrictions on the system of coercions
+which will yield a decidable type inference problem.
+
+{\bf Definition 26.}
+{\sl If for two types $t_1$ and $t_2$ there is a type $t$
+such that $t_1 \subtype t$ and $t_2 \subtype t$
+then $t$ is called a {\em common upper bound}
+of $t_1$ and $t_2$.}
+
+{\sl A {\em minimal upper bound} $\mub(t_1,t_2)$ of two types
+$t_1$ and $t_2$ is a type $t$ satisfying the following
+conditions.
+\begin{enumerate}
+\item The type $t$ is a common upper bound of $t_1$ and $t_2$.
+\item If $t'$ is a type which is a common
+upper bound of $t_1$ and $t_2$
+ such that $t' \subtype t$,
+then $t \subtype t'$.
+\end{enumerate}
+A {\em complete set of minimal upper bounds}
+for two types $t_1$ and $t_2$
+is a set $\CSMUB(t_1,t_2)$ such that
+\begin{enumerate}
+\item
+ all $t \in \CSMUB(t_1,t_2)$
+are a minimal common upper bound of $t_1$ and $t_2$, and
+\item for every type $t'$
+which is a common upper bound of $t_1$ and $t_2$ there is
+a $t \in \CSMUB(t_1,t_2)$ such that $t \subtype t'$.
+\end{enumerate}}
+
+If two types $t_1$ and $t_2$ have no minimal
+upper bound then the complete sets of minimal upper bounds
+are all empty. In this case we will write
+$\CSMUB(t_1,t_2)=\emptyset$.
+We will write $|\CSMUB(t_1,t_2)|$ to denote the
+smallest cardinality
+of a complete set of minimal upper bounds of $t_1$ and $t_2$.
+
+If the partial order induced by $\subtype$ is
+a quasi-lattice then
+$|\CSMUB(t_1,t_2)| \leq 1$ for all types $t_1$ and
+$t_2$.
+However, as we have seen in Sec.~\ref{secpropcoerpreord}
+this partial order will not be a quasi-lattice in general.
+
+In the following we will assume that for
+any two {\em base types} $t_1^{\rm b}$ and $t_2^{\rm b}$ a
+{\em finite}
+complete
+set of minimal upper bounds can be computed effectively,
+say by ${\sf CSMUBBT}(t_1^{\rm b},t_2^{\rm b})$.
+We will give an algorithm computing for any
+two types $t_1$ and $t_2$ a complete set of minimal
+upper bounds and will show that this set is finite.
+
+{\bf Theorem 9.}
+{\sl Assume that all coercions are coercions between base types, direct
+embeddings and structural coercions. Moreover, assume that for all
+type constructors $f$ there is at most one direct embedding position,
+i.\,e.\ $|{\cal D}_f| \leq 1$, and no antimonotonic coercions are
+present, i.\,e.\ ${\cal A}_f=\emptyset$, and for any base types
+$t_1^{\rm b}$ and $t_2^{\rm b}$ there is a finite complete set of
+minimal upper bounds with respect to the set of base types which can
+be effectively computed by a function ${\sf CSMUBBT}(t_1^{\rm
+b},t_2^{\rm b})$.}
+
+{\sl Then for any two types $t_1$ and $t_2$ there is a finite complete set
+of minimal upper bounds which can be effectively computed.}
+
+\begin{proof}
+We claim that algorithm ${\sf CSMUBGT}$
+(see Fig.~\ref{algCSMUBGT})
+ terminates
+for any input parameters $t_1$ and $t_2$ and
+computes a complete set of minimal upper bounds
+which is finite.
+
+
+\newsavebox{\algcsmubgt}
+\newsavebox{\algcsmubbt}
+\newsavebox{\algcsmuall}
+\sbox{\algcsmubgt}{
+\begin{minipage}[l]{0.94\textwidth}
+\begin{center}
+${\cal U} \leftarrow {\sf CSMUBGT}(t_1,t_2)$
+\end{center}
+[${\cal U}$ is a complete set of minimal upper bounds
+of two types $t_1$ and $t_2$. Requires that
+only direct embeddings and structural coercions are used,
+$|{\cal D}_f| \leq 1$
+and ${\cal A}_f = \emptyset$ for any type constructor
+$f$. Assumes that algorithm {\sf CSMUBBT} returns a finite
+set.]
+\begin{deflist}{(1)}
+\item[(1)] [$t_1$ and $t_2$ base types.]
+{\bf if} $\com(t_1)=1$ and $\com(t_2)=1$
+ {\bf then }
+ $\aldesbegbr$
+ ${\cal U} \leftarrow {\sf CSMUBBT}(t_1,t_2)$;
+ {\bf return}
+ $\aldesendbr$.
+\item[(2)] [Ensure that $\com(t_1) \leq \com(t_2)$.]
+ {\bf if} $\com(t_1) > \com(t_2)$ {\bf then}
+ $\aldesbegbr$
+ $h \leftarrow t_1$; $t_1 \leftarrow t_2$;
+ $t_2 \leftarrow h$
+ $\aldesendbr$.
+\item[(3)] [$t_1$ a base type.]
+ {\bf if} $\com(t_1) = 1$ {\bf then}
+ $\aldesbegbr$ \\
+ let $t_2 = f(t_2^1, \ldots, t_2^n)$; \\
+ {\bf if} $|{\cal D}_f| = 0$ {\bf then}
+ $\aldesbegbr$
+ ${\cal U} \leftarrow \emptyset$;
+ {\bf return}$\aldesendbr$; \\
+ let ${\cal D}_f = \{ i \}$; \\
+ ${\cal U}' \leftarrow {\sf CSMUBGT}(t_1,t_2^i)$;
+ \begin{deflist}{(3.1)}
+ \item[(3.1)] {\bf if} ${\cal U}' = \emptyset$
+ {\bf then} $\aldesbegbr$
+ ${\cal U} \leftarrow \emptyset$;
+ {\bf return}$\aldesendbr$;
+ \item[(3.2)] {\bf if} ${\cal U}' \neq \emptyset$
+ {\bf then} $\aldesbegbr$
+ {\bf if} $i \in {\cal M}_f$ {\bf then}
+ $\aldesbegbr$
+ ${\cal U} \leftarrow \emptyset$; \\
+ {\bf for} $t' \in {\cal U}'$
+ {\bf do} \\
+ ${\cal U} \leftarrow {\cal U} \cup
+ \{ f(t_2^1, \ldots, t_2^{i-1},
+ t', t_2^{i+1},\ldots, t_2^n) \}$
+ $\aldesendbr$; \\
+ {\bf if} $i \notin {\cal M}_f$ {\bf then}
+ $\aldesbegbr$
+ {\bf if} $t_2^i \in {\cal U}'$
+ {\bf then} ${\cal U} \leftarrow \{t_2\}$ \\
+ {\bf else} ${\cal U} \leftarrow \emptyset$$\aldesendbr$
+ {\bf return}
+ $\aldesendbr$ $\aldesendbr$.
+ \end{deflist}
+\item[(4)] [General case.] let $t_1 = g(t_1^1, \ldots, t_1^m)$;
+ let $t_2 = f(t_2^1, \ldots, t_2^n)$;
+ ${\cal U} \leftarrow \emptyset$.
+\item[(5)] [Structural coercions.] {\bf if} $f=g$ {\bf then}
+ $\aldesbegbr$ \\
+ {\bf for} $i \in {\cal M}_f$ {\bf do}
+ ${\cal U}_i \leftarrow {\sf CSMUBGT}(t_1^i,t_2^i)$; \\
+ let ${\cal M}_f = \{ j_1, \ldots, j_l \}$; \\
+ {\bf if} $t_1^k = t_2^k$
+ for all $k \in \{1, \ldots, n \} - {\cal M}_f$
+ {\bf then} $\aldesbegbr$ \\
+ {\bf for} $(t'_{j_1}, \ldots, t'_{j_l}) \in
+ {\cal U}_{j_1} \times \cdots \times {\cal U}_{j_l}$
+ {\bf do}
+ $\aldesbegbr$ \\
+ {\bf for} $k \in \{1, \ldots, n \} - {\cal M}_f$
+ {\bf do} $t'_k \leftarrow t_1^k$; \\
+ ${\cal U} \leftarrow {\cal U} \cup
+ \{ f(t'_1, \ldots, t'_n) \}$
+ $\aldesendbr$ $\aldesendbr$ $\aldesendbr$.
+\item[(6)] [Direct embeddings in $g$.]
+ {\bf if }
+ $|{\cal D}_g| = 1$ {\bf then} $\aldesbegbr$ \\
+let ${\cal D}_g = \{ i \}$;
+ ${\cal U}' \leftarrow {\sf CSMUBGT}(t_1^i,t_2)$; \\
+ {\bf if} ${\cal U}' \neq \emptyset$
+ {\bf then} $\aldesbegbr$
+ {\bf if} $i \in {\cal M}_g$ {\bf then}
+ $\aldesbegbr$
+ {\bf for} $t' \in {\cal U}'$
+ {\bf do}
+ ${\cal U} \leftarrow {\cal U} \cup
+ \{ g(t_2^1, \ldots, t_2^{i-1},
+ t', t_2^{i+1},\ldots, t_2^m) \}$
+ $\aldesendbr$; \\
+ {\bf if} $i \notin {\cal M}_g$
+ and $t_1^i \in {\cal U}'$
+ {\bf then} ${\cal U} \leftarrow {\cal U} \cup \{t_1\}$
+ $\aldesendbr$ $\aldesendbr$.
+\item[(7)] [Direct embeddings in $f$.] {\bf if}
+ $|{\cal D}_f| = 1$ {\bf then} $\aldesbegbr$ \\
+let ${\cal D}_f = \{ i \}$;
+ ${\cal U}' \leftarrow {\sf CSMUBGT}(t_1,t_2^i)$; \\
+ {\bf if} ${\cal U}' \neq \emptyset$
+ {\bf then} $\aldesbegbr$
+ {\bf if} $i \in {\cal M}_f$ {\bf then}
+ $\aldesbegbr$
+ {\bf for} $t' \in {\cal U}'$
+ {\bf do}
+ ${\cal U} \leftarrow {\cal U} \cup
+ \{ f(t_2^1, \ldots, t_2^{i-1},
+ t', t_2^{i+1},\ldots, t_2^n) \}$
+ $\aldesendbr$; \\
+ {\bf if} $i \notin {\cal M}_f$ and
+ $t_2^i \in {\cal U}'$
+ {\bf then} ${\cal U} \leftarrow {\cal U} \cup \{t_2\}$
+ $\aldesendbr$ $\aldesendbr$.
+\end{deflist}
+\end{minipage}
+}
+\sbox{\algcsmuall}{
+\begin{minipage}[l]{0.96\textwidth}
+\begin{center}
+\usebox{\algcsmubgt}
+\end{center}
+\end{minipage}
+}
+\begin{figure}[tbhp]
+\hfil\fbox{\usebox{\algcsmuall}}
+\caption{An algorithm computing a complete set of
+minimal upper bounds}
+\label{algCSMUBGT}
+\end{figure}
+
+
+
+We will prove this claim by
+induction on the complexity of $t_1$
+and $t_2$ along the steps of the algorithm.
+
+If $t_1$ and $t_2$ are base types, then
+${\sf CSMUBBT}(t_1,t_2)$ is also
+ a complete set of minimal
+ upper bounds of $t_1$ and
+$t_2$ with respect to
+all types. This subclaim can be proved by
+induction on the complexity of possible common upper bounds
+of $t_1$ and $t_2$ using the assumption that for
+any type constructor $f$ we
+have $|{\cal D}_f| \leq 1$.\footnote{Without this assumption
+the subclaim is false in general.}
+
+So the algorithm terminates for the case of base types
+and
+returns a finite set which is a complete set of minimal upper
+bounds for $t_1$ and $t_2$.
+
+The algorithm will terminate for all other $t_1$
+and $t_2$, too.
+Recursive calls of the algorithm are done on arguments
+of which at least one has a strictly smaller complexity.
+Since any of the recursive calls returns a finite set,
+only finitely many iterations have to be performed
+by the algorithm and the returned set is finite.
+
+Since only direct embeddings and
+monotonic structural coercions are present,
+any element of ${\cal U}$ is a minimal upper
+bound of $t_1$ and $t_2$.
+The set ${\cal U}$ will be a complete set of
+minimal upper bounds, because
+$|{\cal D}_f| \leq 1$ for any type constructor
+and all other possibilities of minimal upper bounds
+for $t_1$ and $t_2$ are covered by the algorithm.
+
+
+Since ${\sf CSMUBGT}$ returns a finite set
+of types, the existence of a finite set of minimal
+upper bounds follows from the correctness of
+the algorithm.
+\qed
+\end{proof}
+
+\begin{remark}
+Since algorithm ${\sf CSMUBGT}$ uses the type constructors
+given by its arguments and does not have to perform
+a search on all type constructors,
+it is not necessary that the signature is finite.
+It is only necessary that there is an effective algorithm
+which computes for any type constructor $f$ the sets
+${\cal D}_f$ and ${\cal M}_f$, and that
+the conditions imposed on algorithm ${\sf CSMUBBT}$
+are fulfilled.\footnote{If the signature is finite,
+these conditions will always be fulfilled if the coercions
+between the base types are effectively given.}
+
+An example of an infinite signature
+with such properties is
+a finite signature extended with a
+type constructor $\tf{M}_{m,n}$ for any
+$m,n \in \NN$
+with the intended meaning of building the $m\times n$-matrices
+over commutative rings.
+It is natural to define ${\cal M}_{\tf{M}_{m,n}}=\{1\}$ for all
+$m,n \in \NN$ and to have ${\cal D}_{\tf{M}_{m,n}}=\emptyset$ for
+$m\neq n$ and ${\cal D}_{\tf{M}_{n,n}}=\{1\}$ for any $n \in \NN$.
+\end{remark}
+
+\subsubsection{Complexity of Type Inference}
+\label{secomtycoer}
+
+In \cite{Wand89} and \cite{Linc92}
+the complexity of type inference for expressions of the $\lambda$-calculus
+which are typed by allowing various possibilities of coercions are
+investigated.
+
+In \cite{Linc92} the problem is shown to be NP-hard if the order given
+by the coercions is arbitrary but fixed by reducing the following
+problem on partial orders called {\sc Pol-Sat} to it.
+\begin{quote}
+Given a partial order $\langle P, \leq \rangle$ and a set of
+inequalities $I$ of the form $p \leq w$, $w \leq w'$, where $w$ and
+$w'$ are variables, and $p$ is a constant drawn from $P$, is there an
+assignment from variables to members of $P$ that satisfies all
+inequalities of $I$?
+\end{quote}
+{\sc Pol-Sat} is an NP-complete problem. It is shown to be NP-hard by
+reducing the {\sf 3-SAT}-problem to it.\footnote{A proof that
+{\sf 3-SAT} is NP-complete can be found e.\,g.\ in
+\cite[p.~347]{Davi94}.} However, if only lattices are
+allowed as partial orders in {\sc Pol-Sat} then the problem is
+decidable in linear time.
+
+A quite similar problem on partial orders, called {\sc Po-Sat} is
+introduced in \cite{Wand89}, which is reduced in polynomial time to a
+type inference problem using polymorphic functions. The problem {\sc
+ Po-Sat} is proven to be NP-complete for arbitrary partial orders but
+to be solvable in polynomial time if the partial orders are restricted
+to finite quasi-lattices.
+
+A quite systematic study of the complexity of decision problems for
+various partial orders which might be relevant for type inference is
+given in \cite{Tiur92}.
+
+\section{Other Typing Constructs}
+\label{chapothtyc}
+
+\subsection{Partial Functions}
+\label{secpartfunc}
+
+Many functions arising in the area of computer algebra are only
+partially defined. Some basic examples are
+
+\begin{enumerate}
+\item division in a field, which is defined for non-zero elements only;
+\item matrices over fields have inverses only, if they are regular;
+\item the square-root over the reals exists for non-negative
+values only.
+\end{enumerate}
+
+We could make partial functions total by introducing new types --- the
+type of elements, on which the function is defined.
+
+The following examples, which are taken from \cite{Farm90},
+show that there are severe problems if we were to take this solution.
+
+Let $f$ be the binary functions over the reals defined by
+$$f(x,y) = \sqrt{x-y}.$$
+The function $f$ cannot be represented as a {\em binary\/} total-function
+in a many-sorted algebra since the domain of $f$ is not a set of the
+form $D_x \times D_y$, where $D_x$ and $D_y$ are
+subsets of the real numbers.
+
+It makes good sense to view division in a field as a partial function
+with the second argument having the type of the field. If in the case
+of the rationals we were to restrict the second argument to a type
+``non-zero rationals'', we would have made this function total.
+However, this solution has a severe drawback. A term such as
+$1/(2-1)$ is no longer well-formed, since ``$-$'' is a function into
+the rationals and not into the non-zero rationals only.
+
+The usual solution which is taken in connection with
+many-sorted and order-sorted algebras uses the ``opposite'' way.
+
+New elements --- ``error elements'' --- are introduced
+and new types are built by adjoining these error elements.
+A partial function is made total by setting the value
+of the function to be an error element if it is undefined before,
+see e.\,g.\ \cite{Smol89} for a more detailed description
+of this construction.
+
+This construction is also used in universal algebra in order
+to embed a partial algebra in a full algebra,
+see e.\,g.\ \cite[p.~79]{Grae79}.
+
+In the area of computer algebra this approach is taken in
+the computer algebra system {\sf Axiom}.
+
+The disadvantage of this approach is that we loose information.
+If we consider terms built out of partial functions and
+total functions, we have to repeat the construction.
+Since the range of the partial function has increased,
+a previously total function has become partial, since it is
+not defined on the error value.
+
+In the general framework of many-sorted or order-sorted computations,
+it might be difficult to regain the lost information. There are
+important examples, where the set of elements on which a partial
+function is defined is only recursively enumerable but not recursive
+(see e.\,g.\ \cite[p.~342]{Smol89} for an example).
+
+In connection with a computer algebra system, a better solution should
+be possible. In most cases, the set of elements a partial function is
+defined on can easily be decided; in our examples a simple test for
+being non-zero, non-negative or calculating a determinant would have
+been sufficient.
+
+Hence, in these cases it is decidable whether a
+{\em ground term\/} is well formed, i.\,e. has an error value or not.
+
+Finding conditions and algorithms which tell the (minimal) type of an
+arbitrary term is an interesting problem, whose solution would be of
+practical significance.
+
+\subsubsection{Retractions}
+\label{specialize}
+
+The sum of two polynomials is in general again a polynomial.
+However, if we add the polynomials $(-x+5)$ and $(x+2)$, we obtain
+the constant polynomial $3$ as a result.
+For future computations it would be useful if we {\em retract}
+the type of the result from \tf{integral polynomial} to
+\tf{integer}
+
+Since retractions are partially defined implicit conversion functions
+the general framework developed for other kinds of partial functions
+also applies to retractions.
+
+\subsection{Types Depending on Elements}
+\label{chtydeel}
+
+In this section we will discuss typing constructs which correspond to
+the case of elements as parameters to domain constructors in {\sf
+ Axiom}. We will use the term ``types depending on elements'' to
+describe these types, because it seems to be more or less standard for
+type theories including such constructs.
+
+There are some important examples of data structures whose type
+depends on a non-negative integer.
+\begin{itemize}
+\item Elements of $\ZZ_m$.
+\item Vectors of dimension $n$.
+\item The $m \times n$-matrices.
+\end{itemize}
+However, the elements a type can depend on are not restricted to
+integers.
+
+An algebraic number $\alpha$ over $\QQ$ is usually represented by its
+minimal polynomial over the rationals. Thus, an element of the field
+$\QQ[\alpha]$ has a type depending on some polynomial over the
+rationals.
+
+An example of a type which depends on a matrix (namely the matrix
+defining a quadratic form) is the one which is built by the domain
+constructor $\tf{CliffordAlgebra}$ (see \cite[Sec.~9.9]{Jenk92}.
+
+In group theory programs, very often a group is represented with
+respect to its generators, cf.\ \cite{GAPx17}. So the concept of
+types depending on elements is a possibility to treat certain
+structures which are treated as objects of a computation in a certain
+context as {\em types} in another one (cf.\ Sec.~\ref{sgroupth}).
+
+Some of the examples given above could be reformulated such that the
+concept of types depending on elements is no longer necessary in order
+to describe them. So it might be sufficient to have only a type of
+matrices of arbitrary dimension (over some ring) in the system and not
+a type of $m \times n$-matrices. Then matrix-multiplication or even
+addition of two matrices would be partial functions only. A treatment
+of partial functions (cf.\ Sec.~\ref{secpartfunc}) would be sufficient
+and the additional concept of types depending on elements could be
+avoided.
+
+However, for the case of $\ZZ_m$ it seems to be necessary to have for
+any $m \in \NN$ also a type corresponding to $\ZZ_m$ in a system which
+also allows the possibility to have computations on the integer $m$.
+
+So the concept of types depending on elements is important for many
+computer algebra applications. Unfortunately, as we will show below
+it is not possible to have type-safe compile-time type-checking.
+
+\subsubsection{Undecidability of Type Checking}
+
+\label{undetychtydeel}
+
+{\bf Lemma 8.}
+\label{lemundetychtydeel}
+{\sl Let ${\cal R}$ be the class of unary recursive functions.
+Then the following questions are undecidable:
+\begin{enumerate}
+\item For $f \in {\cal R}$, is $f(x)=n$
+for some fixed $n \in \NN$ and for all $x$?
+\item For $f \in {\cal R}$, is $f(x)$ a prime number for all $x$?
+\item For $f \in {\cal R}$, is $\gcd(f(x),n)=1$ for some fixed $n \in \NN$
+and for all $x$?
+\end{enumerate}}
+
+\begin{proof}
+All of the questions above are equal to determining the membership of
+$f$ in certain classes of partial recursive functions, which are all
+non-trivial. So the lemma is proved by applying Rice's Theorem (see
+e.\,g.\ \cite[p.~150]{Odif92}). \qed
+\end{proof}
+
+Assume that the language is universal, i.\,e.\ every partial recursive
+function can be computed in the language. Assume that there is a type
+corresponding to $\NN$ present in the language and that indeed every
+unary recursive function can be represented in the system as one
+having type $\NN \longrightarrow \NN$. Moreover, assume that there is
+a type corresponding to $\ZZ_m$ for any $m \in \NN$.
+
+Let $n \in \NN$ and let $f: \NN \longrightarrow \NN$ be a unary
+recursive function. By Lemma 8 it cannot be
+decided by a compiler, whether $\ZZ_{f(x)}$ and $\ZZ_n$ are equal.
+Thus having $a \in \ZZ_{f(x)}$ and $b \in \ZZ_n$ and having a
+polymorphic operation ${\tt op}$ with type
+$$\forall t \, . \, t \times t \longrightarrow \tf{Boolean}$$
+like the check for equality it cannot be decided at compile time
+whether
+$${\tt op}(a,b)$$
+is well typed.
+
+Determining whether $\ZZ_{f(x)}$ is a field, i.\,e.\ whether $f(x)$ is
+prime is also not possible at compile time. So it cannot be decided
+whether computations requiring that $\ZZ_{f(x)}$ is a field are legal.
+
+Since it cannot be decided by the compiler whether $\gcd(f(x),n)=1$ it
+is also impossible to decide whether the lifting connected with the
+Chinese remainder theorem can be applied to an element of $\ZZ_{f(x)}$
+and to one of $\ZZ_n$ giving one of $\ZZ_{f(x) \cdot n}$.
+
+In the following we will show that it is necessary to allow such
+run-time computations of elements a type depends on for many important
+applications in computer algebra.
+
+\subsubsection{Necessity of Run-Time Computations
+of Elements Types Depend on}
+
+Frequently, computations in $\ZZ_m$\footnote{Or in the ring of
+ polynomials over $\ZZ_m$, etc. In our framework these structures
+ can be all expressed as types having $\ZZ_m$ substituted for a type
+ variable.} are done in the context of computer algebra because of
+the following observation:
+
+If one wants to have the solution for a problem over the integers,
+then it is often possible to compute a $b \in \NN$ (a ``bound'') such
+that for all $n \geq b$ the result of the computation in $\ZZ_n$ can
+easily be extended to a solution for the problem over the
+integers.\footnote{Many books on computer algebra can serve as
+references, e.\,g.\ \cite{Buch82} --- especially \cite{Laue82} or
+\cite{Kalt83a} --- or \cite{Dave88}, \cite{Lips81}, \cite{Gedd92}, and
+also \cite{Knut71}.}
+
+Very often, these computations are not done directly in $\ZZ_b$, but
+in $\ZZ_{p_1}, \ldots, \ZZ_{p_h}$ for primes $p_1, \ldots, p_h$. The
+results are then ``lifted'' either to $\ZZ_{p_1 \cdots p_h}$ by an
+application of the Chinese remainder theorem or to $\ZZ_{p^l}$ by a
+Hensel lifting. The choice of $p_1, \ldots, p_h$ resp.\ of $p$ and
+$l$ are such that $p_1 \cdots p_h \geq b$ resp.\ $p^l \geq b$.
+
+However, the class of algorithms which is used to compute the bounds
+can be fairly complicated. Technically speaking, if $f(x)$ and $g(x)$
+are two functions that can be computed by the class of algorithms used
+for the bound computations, then it is undecidable whether
+$$f(x) \equiv g(x) \quad\quad\forall x.$$
+Let us now assume that we could restrict the occurring types to the
+ones corresponding to $\ZZ_{p_1 \cdots p_k}$, where $\{p_1, p_2, p_3,
+\ldots \}$ is the set of prime numbers. However, it is undecidable
+whether $p_1 \cdots p_k = p_1 \cdots p_{k'}$, if $k$ and $k'$ are
+minimal such that $p_1 \cdots p_k \geq f(x)$ and $p_1 \cdots p_{k'}
+\geq g(x)$. So a compiler cannot decide whether a statement involving
+an element of $\ZZ_{p_1 \cdots p_k}$ and one of $\ZZ_{p_1 \cdots
+p_{k'}}$ requiring both to have the same type\footnote{Simple
+operations such as a test for equality or addition can serve as
+examples.} will lead to a typing error or not.
+
+\subsubsection{Calculi Dealing with Types Depending on Elements}
+
+The results of this section show that it is useful to distinguish
+between domains and elements as parameters of domain constructors.
+
+Having only type classes as additional typing construct a static
+typechecking is possible in principle in the former case. In the
+latter case it becomes undecidable, where we have argued that this
+undecidability results are relevant for many examples occurring in
+practical computer algebra applications.
+
+For a user interface it is usually sufficient to perform type
+inference on expressions which do not allow recursion and which do not
+form a Turing-complete language for computations on elements types
+depend on.
+
+So the problems which yield that the type inference problem and even
+the type checking problem is undecidable in the case of a computer
+algebra language do not apply to the case of a user interface of a
+computer algebra system.
+
+Since the type of an element another type depends on can nevertheless
+be quite complicated (see the examples given above) it seems to be
+useful to have some sophisticated techniques available also for this
+case.
+
+During the last years several general type theories having the concept
+``types depending on elements'' have been developed. Some are
+Martin-L\"of's Type Theory \cite{Mart80}, and the {\em Calculus of
+ Constructions} of Coquand and Huet \cite{Coqu86}, They have been
+explored extensively, especially as ``logical frameworks''
+\cite{Huet91}. For this purpose several subcalculi and variations
+such as LF \cite{Harp93}, or Elf \cite{Pfen89}, \cite{Pfen91},
+\cite{Pfen92} have been defined. Some extensions of unification
+algorithms to these type theories have been given in \cite{Elli89},
+\cite{Pfen91a}. For the purpose of computer algebra probably
+another variant of this theories will be more suited than the
+existing. Nevertheless, it seems to be very likely that some of the
+obtained results are applicable to the type inference problem for a
+user interface of a computer algebra system.
+
\chapter{Finite Fields in Axiom (Grabmeier/Scheerhorn)}
This was written by Johannes Grabmeier and Alfred Scheerhorn.
diff --git a/books/bookvolbib.pamphlet b/books/bookvolbib.pamphlet
index 75cf361..26cf53d 100644
--- a/books/bookvolbib.pamphlet
+++ b/books/bookvolbib.pamphlet
@@ -521,6 +521,61 @@ paragraph for those unfamiliar with the terms.
\section{Algebra Documentation References}
+\subsection{A} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Abdali, S. Kamal}
+\index{Cherry, Guy W.}
+\index{Soiffer, Neil}
+\begin{chunk}{axiom.bib}
+@inproceedings{Abda86,
+ author = "Abdali, S. Kamal and Cherry, Guy W. and Soiffer, Neil",
+ title = "A Smalltalk System for Algebraic Manipulation",
+ booktitle = "OOPSLA 86",
+ pages = "277-293",
+ year = "1986",
+ abstract =
+ "This paper describes the design of an algebra system Views
+ implemented in Smalltalk. Views contains facilities for dynamic
+ creation and manipulation of computational domains, for viewing these
+ domains as various categories such as groups, rings, or fields, and
+ for expressing algorithms generically at the level of categories. The
+ design of Views has resulted in the addition of some new abstractions
+ to Smalltalk that are quite useful in their own right. Parameterized
+ classes provide a means for run-time creation of new classes that
+ exhibit generally very similar behavior, differing only in minor ways
+ that can be described by different instantiations of certain
+ parameters. Categories allow the abstraction of the common behavior of
+ classes that derives from the class objects and operations satisfying
+ certain laws independently of the implementation of those objects and
+ operations. Views allow the run-time association of classes with
+ categories (and of categories with other categories), facilitating the
+ use of code written for categories with quite different
+ interpretations of operations. Together, categories and views provide
+ an additional mechanism for code sharing that is richer than both
+ single and multiple inheritance. The paper gives algebraic as well as
+ non-algebraic examples of the above-mentioned features.",
+ paper = "Abda86.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Aho, Alfred V.}
+\index{Sethi, Ravi}
+\index{Ullman, Jeffrey D.}
+\begin{chunk}{axiom.bib}
+@book{Ahox86,
+ author = "Aho, Alfred V. and Sethi, Ravi and Ullman, Jeffrey D.",
+ title = "Compilers: Principles, Techniques, and Tools",
+ year = "1986",
+ publisher = "Addison-Wesley",
+ isbn = "978-0201100884"
+}
+
+\end{chunk}
+
+\subsection{B} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
\index{Baker, Martin}
\begin{chunk}{axiom.bib}
@misc{Bake16b,
@@ -543,6 +598,353 @@ paragraph for those unfamiliar with the terms.
\end{chunk}
+\index{Barendregt, H. P.}
+\begin{chunk}{axiom.bib}
+@book{Bare84,
+ author = "Barendregt, H. P.",
+ title = "The Lambda Calculus: Its Syntax and Semantics",
+ publisher = "Elsevier Science",
+ year = "1984"
+}
+
+\end{chunk}
+
+\index{Baumgartner, Gerald}
+\index{Stansifer, Ryan D.}
+\begin{chunk}{axiom.bib}
+@techreport{Baum95,
+ author = "Baumgartner, Gerald and Stansifer, Ryan D.",
+ title = "A Proposal to Study Type Systems for Computer Algebra",
+ type = "technical report",
+ institution = "RISC-LINZ",
+ number = "90-07.0",
+ abstract =
+ "It is widely recognized that programming languages should oer
+ features to help structure programs. To achieve this goal, languages
+ like Ada , Modula-2 , object- oriented languages, and functional
+ languages have been developed. The structur- ing techniques available
+ so far (like modules, classes, parametric polymorphism) are still not
+ enough or not appropriate for some application areas. In symbolic
+ computation, in particular computer algebra, several problems occur
+ that are dicult to handle with any existing programming
+ language. Indeed, nearly all available computer algebra systems suer
+ from the fact that the underlying pro- gramming language imposes too
+ many restrictions.
+
+ We propose to develop a language that combines the essential features
+ from functional languages, object-oriented languages, and computer
+ algebra systems in a semantically clean manner. Although intended for
+ use in symbolic computation, this language should prove interesting as
+ a general purpose programming language. The main innovation will be
+ the application of sophisticated type systems to the needs of computer
+ algebra systems. We will demonstrate the capabilities of the language
+ by using it to implement a small computer algebra library. This im-
+ plementation will be compared against a straightforward Lisp
+ implementation and against existing computer algebra systems. Our
+ development should have an impact both on the programming languages
+ world and on the computer algebra world.",
+ paper = "Baum95.pdf"
+}
+
+\end{chunk}
+
+\index{Berger, Emery}
+\begin{chunk}{axiom.bib}
+@techreport{Berg92,
+ author = "Berger, Emery",
+ title = "FP + OOP = Haskell",
+ institution = "University of Texas",
+ number = "TR-92-30",
+ abstract =
+ "The programming language Haskell adds object-oriented functionality
+ (using a concept known as type classes) to a pure functional
+ programming framework. This paper describes these extensions and
+ analyzes its accomplishments as well as some problems."
+}
+
+\end{chunk}
+
+\index{Birtwistle, Graham M.}
+\begin{chunk}{axiom.bib}
+@book{Birt80,
+ author = "Birtwistle, Graham M.",
+ title = "Simula Begin",
+ year = "1980",
+ publisher = "Chartwell-Bratt",
+ isbn = "9780862380090"
+}
+
+\end{chunk}
+
+\index{Breazu-Tannen, Val}
+\index{Coquand, Thierry}
+\index{Gunter, Carl A.}
+\index{Scedrov, Andre}
+\begin{chunk}{axiom.bib}
+@inproceedings{Brea89,
+ author = "Breazu-Tannen, Val and Coquand, Thierry and Gunter, Carl A. and
+ Scedrov, Andre",
+ title = "Inheritance and Explicit Coercion",
+ booktitle = "Logic in Computer Science",
+ year = "1989",
+ isbn = "0-8186-1954-6",
+ abstract =
+ "A method is presented for providing semantic interpretations for
+ languages which feature inheritance in the framework of statically
+ checked, rich type disciplines. The approach is illustrated by an
+ extension of the language Fun of L. Cardelli and P. Wegner (1985),
+ which is interpreted via a translation into an extended polymorphic
+ lambda calculus. The approach interprets inheritances in Fun as
+ coercion functions already definable in the target of the
+ translation. Existing techniques in the theory of semantic domains can
+ then be used to interpret the extended polymorphic lambda calculus,
+ thus providing many models for the original language. The method
+ allows the simultaneous modeling of parametric polymorphism, recursive
+ types, and inheritance, which has been regarded as problematic because
+ of the seemingly contradictory characteristics of inheritance and type
+ recursion on higher types. The main difficulty in providing
+ interpretations for explicit type disciplines featuring inheritance is
+ identified. Since interpretations follow the type-checking
+ derivations, coherence theorems are required, and the authors prove
+ them for their semantic method.",
+ paper = "Brea89.pdf"
+}
+
+\end{chunk}
+
+\index{Breazu-Tannen, Val}
+\index{Coquand, Thierry}
+\index{Gunter, Carl A.}
+\index{Scedrov, Andre}
+\begin{chunk}{axiom.bib}
+@article{Brea91,
+ author = "Breazu-Tannen, Val and Coquand, Thierry and Gunter, Carl A. and
+ Scedrov, Andre",
+ title = "Inheritance as Implicit Coercion",
+ volume = "93",
+ number = "1",
+ year = "1991",
+ pages = "172-221",
+ abstract =
+ "We present a method for providing semantic interpretations for
+ languages with a type system featuring inheritance polymorphism. Our
+ approach is illustrated on an extension of the language Fun of
+ Cardelli and Wegner, which we interpret via a translation into an
+ extended polymorphic lambda calculus. Our goal is to interpret
+ inheritances in Fun via coercion functions which are definable in the
+ target of the translation. Existing techniques in the theory of
+ semantic domains can be then used to interpret the extended
+ polymorphic lambda calculus, thus providing many models for the
+ original language. This technique makes it possible to model a rich
+ type discipline which includes parametric polymorphism and recursive
+ types as well as inheritance. A central difficulty in providing
+ interpretations for explicit type disciplines featuring inheritance in
+ the sense discussed in this paper arises from the fact that programs
+ can type-check in more than one way. Since interpretations follow the
+ type-checking derivations, coherence theorems are required: that is,
+ one must prove that the meaning of a program does not depend on the
+ way it was type-checked. Proofs of such theorems for our proposed
+ interpretation are the basic technical results of this
+ paper. Interestingly, proving coherence in the presence of recursive
+ types, variants, and abstract types forced us to reexamine fundamental
+ equational properties that arise in proof theory (in the form of
+ commutative reductions) and domain theory (in the form of strict
+ vs. non-strict functions).",
+ paper = "Brea91.pdf"
+}
+
+\end{chunk}
+
+\index{Bruce, Kim B.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Bruc93,
+ author = "Bruce, Kim B.",
+ title = "Safe type checking in a statically-typed object-oriented
+ programming language",
+ booktitle = "POPL 93",
+ year = "1993",
+ isbn = "0-89791-560-7",
+ pages = "285-298",
+ abstract =
+ " In this paper we introduce a statically-typed, functional,
+ object-oriented programming language, TOOPL, which supports classes,
+ objects, methods, instance variable, subtypes, and inheritance. It has
+ proved to be surprisingly difficult to design statically-typed
+ object-oriented languages which are nearly as expressive as Smalltalk
+ and yet have no holes in their typing systems. A particular problem
+ with statically type checking object-oriented languages is determining
+ whether a method provided in a superclass will continue to type check
+ when inherited in a subclass. This program is solved in our language
+ by providing type checking rules which guarantee that a method which
+ type checks as part of a class will type check correctly in all legal
+ subclasses in which it is inherited. This feature enables library
+ providers to provide only the interfaces of classes with executables
+ and still allow users to safely create subclasses. The design of TOOPL
+ has been guided by an analysis of the semantics of the language, which
+ is given in terms of a sufficiently rich model of the F-bounded
+ second-order lambda calculus. This semantics supported the language
+ design by providing a means of proving that the type-checking rules
+ for the language are sound, ensuring that well-typed terms produce
+ objects of the appropriate type. In particular, in a well-typed
+ program it is impossible to send a message to an object which lacks a
+ corresponding method.",
+ paper = "Bruc93.pdf"
+}
+
+\end{chunk}
+
+\index{Breazu-Tannen, Val}
+\index{Gallier, Jean}
+\begin{chunk}{axiom.bib}
+@inproceedings{Brea89a,
+ author = "Breazu-Tannen, Val Gallier, Jean",
+ title = "Polymorphic Rewriting Concerves Algebraic Strong Normalization
+ and Confluence",
+ booktitle = "Automata, Languages and Programming",
+ pages = "137-150",
+ year = "1989",
+ abstract =
+ "We study combinations of many-sorted algebraic term rewriting systems
+ and polymorphic lambda term rewriting. Algebraic and lambda terms are
+ mixed by adding the symbols of the algebraic signature to the
+ polymorphic lambda calculus, as higher-order constants.
+
+ We show that if a many-sorted algebraic rewrite system R is strongly
+ normalizing (terminating, noetherian), then
+ $R+\beta+\nu+type-\beta+type-\nu$ rewriting of mixed terms is also
+ strongly normalizing. We obtain this results using a technique which
+ generalizes Girard's ``{candidats de reductibiliti\'e}'', introduced in
+ the original proof of strong normalization for the polymorphic lambda
+ calculus.
+
+ We also show that if a many-sorted algebraic rewrite system $R$ has
+ the Church-Rosser property (is confluent), then
+ $R+\beta+type-\beta+type-\nu$ rewriting of mixed terms has the
+ Church-Rosser property too. Combining the two results, we conclude
+ that if $R$ is canonical (complete) on algebraic terms, then
+ $R+\beta+type-\beta+type-\nu$ is canonical on mixed terms.
+
+ $\nu$ reduction does not commute with algebraic reduction, in general.
+ However, using long $\nu$-normal forms, we show that if $R$ is canonical
+ then $R+\beta+type-\beta+type-\nu$ convertibility is still decidable.",
+paper = "Brea89.pdf"
+}
+
+\end{chunk}
+
+\index{Buchberger, Bruno}
+\index{Collins, George Edwin}
+\index{Loos, Rudiger}
+\begin{chunk}{axiom.bib}
+@book{Buch82,
+ author = "Buchberger, Bruno and Collins, George Edwin and Loos, Rudiger",
+ title = "Computer Algebra: Symbolic and Algebraic Computation",
+ publisher = "Springer",
+ isbn = "978-3-211-81684-4",
+ paper = "Buch82.pdf"
+}
+
+\end{chunk}
+
+\index{Buchberger, Bruno}
+\index{Collins, George E.}
+\index{Encarnacion, Mark J.}
+\index{Hong, Hoon}
+\index{Johnson, Jeremy R.}
+\index{Krandick, Werner}
+\index{Loos, Rudiger}
+\index{Mandache, Ana M.}
+\index{Neubacher, Andreas}
+\index{Vielhaber, Herbert}
+\begin{chunk}{axiom.bib}
+@techreport{Buch93,
+ author = "Buchberger, Bruno and Collins, George E. and Encarnacion, Mark J.
+ and Hong, Hoon and Johnson, Jeremy R. and Krandick, Werner and
+ Loos, Rudiger and Mandache, Ana M. and Neubacher, Andreas and
+ Vielhaber, Herbert",
+ title = "SACLIB 1.1 User's Guide",
+ year = "1993",
+ institution = "Kurt Godel Institute",
+ abstract =
+ "This paper lists most of the algorithms provided by SACLIB and shows
+ how to call them from C. There is also a brief explanation of the
+ inner workings of the list processing and garbage collection
+ facilities of SACLIB",
+ paper = "Buch93.pdf"
+}
+
+\end{chunk}
+
+\index{Buendgen, R.}
+\index{Hagel, G.}
+\index{Loos, R.}
+\index{Seitz, S.}
+\index{Simon, G.}
+\index{Stuebner, R.}
+\index{Weber, A.}
+\begin{chunk}{axiom.bib}
+@article{Buen91,
+ author = "Buendgen, R. and Hagel, G. and Loos, R. and Seitz, S. and
+ Simon, G. and Stuebner, R. and Weber, A.",
+ title = "SAC-2 in ALDES -- Ein Werkzeug fur dis Algorithmenforschung",
+ journal = "MathPAD 1",
+ volume = "3",
+ year = "1991",
+ pages = "33-37"
+}
+
+\end{chunk}
+
+\index{Bundgen, Reinhard}
+\begin{chunk}{axiom.bib}
+@book{Bund93,
+ author = "Bundgen, Reinhard",
+ title = "The ReDuX System Documentation",
+ year = "1993",
+ publisher = "WSI"
+}
+
+\end{chunk}
+
+\index{Bundgen, Reinhard}
+\begin{chunk}{axiom.bib}
+@inproceedings{Bund93a,
+ author = "Bundgen, Reinhard",
+ title = {Reduce the Redex $->$ ReDuX},
+ booktitle = "Proc. Rewriting Techniques and Applications 93",
+ year = "1993",
+ pages = "446-450",
+ publisher = "Springer-Verlag",
+ isbn = "3-540-56868-9"
+}
+
+\end{chunk}
+\index{Butler, Greg}
+\index{Cannon, John}
+\begin{chunk}{axiom.bib}
+@inproceedings{Butl90,
+ author = "Butler, Greg and Cannon, John",
+ title = "The Design of Cayley -- A Language for Modern Algebra",
+ booktitle = "DISCO 1990",
+ year = "1990",
+ pages = "10-19",
+ abstract =
+ "Established practice in the domain of modern algebra has shaped the
+ design of Cayley. The design has also been responsive to the needs of
+ its users. The requirements of the users include consistency with
+ common mathematical notation; appropriate data types such as sets,
+ sequences, mappings, algebraic structures and elements; efficiency;
+ extensibility; power of in-built functions and procedures for known
+ algorithms; and access to common examples of algebraic structures. We
+ discuss these influences on the design of Cayley's user language.",
+ paper = "Butl90.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\subsection{C} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
\index{Cantrill, Bryan}
\begin{chunk}{axiom.bib}
@misc{Cant16,
@@ -554,6 +956,503 @@ paragraph for those unfamiliar with the terms.
\end{chunk}
+\index{Cardelli, Luca}
+\index{Wegner, Peter}
+\begin{chunk}{axiom.bib}
+@article{Card85,
+ author = "Cardelli, Luca and Wegner, Peter",
+ title = "On Understanding Types, Data Abstraction, and Polymorphism",
+ journal = "ACM Computing Surveys",
+ volume = "17",
+ number = "4",
+ year = "1985",
+ pages = "471-523",
+ abstract =
+ "Our objective is to understand the notion of type in programming
+ languages, present a model of typed, polymorphic programming languages
+ that reflects recent research in type theory, and examine the
+ relevance of recent research to the design of practical programming
+ languages.
+
+ Object-oriented languages provide both a framework and a
+ motivation for exploring the interaction among the concepts of type,
+ data abstraction, and polymorphism, since they extend the notion of
+ type to data abstraction and since type inheritance is an important
+ form of polymorphism. We develop a $\lambda$-calculus-based model for type
+ systems that allows us to explore these interactions in a simple
+ setting, unencumbered by complexities of production programming
+ languages.
+
+ The evolution of languages from untyped universes to
+ monomorphic and then polymorphic type systems is reviewed. Mechanisms
+ for polymorphism such as overloading, coercion, subtyping, and
+ parameterization are examined. A unifying framework for polymorphic
+ type systems is developed in terms of the typed $\lambda$-calculus
+ augmented to include binding of types by quantification as well as
+ binding of values by abstraction.
+
+ The typed $\lambda$-calculus is
+ augmented by universal quantification to model generic functions with
+ type parameters, existential quantification and packaging (information
+ hiding) to model abstract data types, and bounded quantification to
+ model subtypes and type inheritance. In this way we obtain a simple
+ and precise characterization of a powerful type system that includes
+ abstract data types, parametric polymorphism, and multiple inheritance
+ in a single consistent framework. The mechanisms for type checking for
+ the augmented $\lambda$-calculus are discussed.
+
+ The augmented typed
+ $\lambda$-calculus is used as a programming language for a variety of
+ illustrative examples. We christen this language Fun because fun
+ instead of $\lambda$ is the functional abstraction keyword and because it
+ is pleasant to deal with.
+
+ Fun is mathematically simple and can serve
+ as a basis for the design and implementation of real programming
+ languages with type facilities that are more powerful and expressive
+ than those of existing programming languages. In particular, it
+ provides a basis for the design of strongly typed object-oriented
+ languages",
+ paper = "Card85.pdf"
+}
+
+\end{chunk}
+
+\index{Cardelli, Luca}
+\begin{chunk}{axiom.bib}
+@inproceedings{Card86,
+ author = "Cardelli, Luca",
+ title = "Typechecking Dependent Types and Subtypes",
+ link =
+ "\url{http://lucacardelli.name/Papers/Dependent%20Typechecking.US.pdf}",
+ year = "1996",
+ journal = "LNCS",
+ volume = "523",
+ pages = "45-57",
+ paper = "Card86.pdf"
+}
+
+\end{chunk}
+
+\index{Cardelli, Luca}
+\begin{chunk}{axiom.bib}
+@article{Card88,
+ author = "Cardelli, Luca",
+ title = "A Semantics of Multiple Inheritance",
+ journal = "Information and Computation",
+ volume = "76",
+ number = "2-3",
+ year = "1988",
+ pages = "138-164",
+ paper = "Card88.pdf"
+}
+
+\end{chunk}
+
+\index{Cardelli, Luca}
+\index{Longo, Giuseppe}
+\begin{chunk}{axiom.bib}
+@article{Card91,
+ author = "Cardelli, Luca and Longo, Giuseppe",
+ title = "A Semantic Basis for Quest",
+ journal = "J. of Functional Programming",
+ volume = "1",
+ number = "4",
+ pages = "417-458",
+ year = "1991",
+ abstract =
+ "Quest is a programming language based on impredicative type
+ quantifiers and subtyping within a three-level structure of kinds,
+ types and type operators, and values.
+
+ The semantics of Quest is rather challenging. In particular,
+ difficulties arise when we try to model simultaneously features such
+ as contravariant function spaces, record types, subtyping, recursive
+ types and fixpoints.
+
+ In this paper we describe in detail the type inference rules for
+ Quest, and give them meaning using a partial equivalence relation
+ model of types. Subtyping is interpreted as in previous work by Bruce
+ and Longo (1989), but the interpretation of some aspects – namely
+ subsumption, power kinds, and record subtyping – is novel. The latter
+ is based on a new encoding of record types.
+
+ We concentrate on modelling quantifiers and subtyping; recursion is
+ the subject of current work.",
+ paper = "Card91.pdf"
+}
+
+\end{chunk}
+
+\index{Chang, C.C.}
+\index{Keisler, H. Jerome}
+\begin{chunk}{axiom.bib}
+@book{Chan90,
+ author = "Chang, C.C. and Keisler, H. Jerome",
+ title = "Model Theory",
+ publisher = "North Holland",
+ year = "1990",
+ comment = "Studics in Logic and the Foundations of Mathematics",
+ volume = "73",
+ abstract =
+ "Since the second edition of this book (1977), Model Theory has
+ changed radically, and is now concerned with fields such as
+ classification (or stability) theory, nonstandard analysis,
+ model-theoretic algebra, recursive model theory, abstract model
+ theory, and model theories for a host of nonfirst order logics. Model
+ theoretic methods have also had a major impact on set theory,
+ recursion theory, and proof theory.
+
+ This new edition has been updated to take account of these changes,
+ while preserving its usefulness as a first textbook in model
+ theory. Whole new sections have been added, as well as new exercises
+ and references. A number of updates, improvements and corrections have
+ been made to the main text"
+}
+
+\end{chunk}
+
+\index{Char, Bruce}
+\index{Geddes, Keith O.}
+\index{Gonnet, Gaston H.}
+\index{Leong, Benton}
+\index{Monagan, Michael B.}
+\index{Watt, Stephen M}
+\begin{chunk}{axiom.bib}
+@book{Char91,
+ author = "Char, Bruce and Geddes, Keith O. and Gonnet, Gaston H. and
+ Leong, Benton and Monagan, Michael B. and Watt, Stephen M.",
+ title = "Maple V Language Reference Manual",
+ publisher = "Springer",
+ year = "1991",
+ isbn = "978-0-387-94124-0"
+}
+
+\end{chunk}
+
+\index{Char, Bruce}
+\index{Geddes, Keith O.}
+\index{Gonnet, Gaston H.}
+\index{Leong, Benton}
+\index{Monagan, Michael B.}
+\index{Watt, Stephen M.}
+\begin{chunk}{axiom.bib}
+@book{Char91a,
+ author = "Char, Bruce and Geddes, Keith O. and Gonnet, Gaston H. and
+ Leong, Benton and Monagan, Michael B. and Watt, Stephen M.",
+ title = "Maple V Library Reference Manual",
+ publisher = "Springer",
+ year = "1991",
+ isbn = "978-1-4757-2133-1",
+ abstract =
+ "The design and implementation of the Maple system is an on-going
+ project of the Symbolic Com putation Group at the University of
+ Waterloo in Ontario, Canada. This manual corresponds with version V
+ (roman numeral five) of the Maple system. The on-line help subsystem
+ can be invoked from within a Maple session to view documentation on
+ specific topics. In particular, the command ?updates points the user
+ to documentation updates for each new version of Maple. The Maple
+ project was first conceived in the autumn of 1980, growing out of
+ discussions on the state of symbolic computation at the University of
+ Waterloo. The authors wish to acknowledge many fruitful discussions
+ with colleagues at the University of Waterloo, particularly Morven
+ Gen tleman, Michael Malcolm, and Frank Tompa. It was recognized in
+ these discussions that none ofthe locaIly-available systems for
+ symbolic computation provided the facilities that should be expected
+ for symbolic computation in modern computing environments. We
+ concluded that since the basic design decisions for the then-current
+ symbolic systems such as ALTRAN, CAMAL, REDUCE, and MACSYMA were based
+ on 1960's computing technology, it would be wise to design a new
+ system ``from scratch//. Thus we could take advantage of the software
+ engineering technology which had become available in recent years, as
+ well as drawing from the lessons of experience. Maple's basic features
+ (elementary data structures, Input/output, arithmetic with numbers,
+ and elementary simplification) are coded in a systems programming
+ language for efficiency."
+}
+
+\end{chunk}
+
+\index{Chen, Kung}
+\index{Hudak, Paul}
+\index{Odersky, Martin}
+\begin{chunk}{axiom.bib}
+@inproceedings{Chen92,
+ author = "Chen, Kung and Hudak, Paul and Odersky, Martin",
+ title = "Parametric Type Classes",
+ booktitle = "Proc. ACM Conf. on LISP and Functional Programming",
+ year = "1992",
+ pages = "170-181",
+ abstract =
+ "We propose a generalization to Haskell's type classes where a class
+ can have type parameters besides the placeholder variable. We show
+ that this generalization is essential to represent container classes
+ with overloaded data constructor and selector operations. We also show
+ that the resulting type system has principal types and present
+ unification and type reconstruction algorithms.",
+ paper = "Chen92.pdf"
+}
+
+\end{chunk}
+
+\index{Collins, George E.}
+\index{Loos, Rudiger}
+\begin{chunk}{axiom.bib}
+@techreport{Coll90,
+ author = "Collins, George E. and Loos, Rudiger",
+ title = "Specification and Index of SAC-2 Algorithms",
+ institution = "Univ. of Tubingen",
+ type = "technical report",
+ year = "1990",
+ number = "WSI-90-4"
+}
+
+\end{chunk}
+
+\index{Comon, Hubert}
+\begin{chunk}{axiom.bib}
+@inproceedings{Como90,
+ author = "Comon, Hubert",
+ title = "Equational Formulas in Order-sorted Algebras",
+ booktitle = "IICALP 90. Automata, Languages and Programming",
+ year = "1990",
+ pages = "674-688",
+ abstract =
+ "We propose a set of transformation rules for first order formulas
+ whose atoms are either equations between terms or “sort constraints” t
+ ε s where s is a regular tree language (or a sort in the algebraic
+ specification community). This set of rules is proved to be correct,
+ terminating and complete. This shows in particular that the first
+ order theory of any rational tree language is decidable, extending the
+ results of [Mal71,CL89,Mah88]. We also show how to apply our results
+ to automatic inductive proofs in equational theories."
+}
+
+\end{chunk}
+
+\index{Comon, Hubert}
+\index{Lugiez, D.}
+\index{Schnoebelen, Ph.}
+\begin{chunk}{axiom.bib}
+@article{Como91,
+ author = "Comon, Hubert and Lugiez, D. and Schnoebelen, Ph.",
+ title = "A Rewrite-based Type Discipline for a Subset of Computer Algebra",
+ journal = "J. Symbolic Computation",
+ volume = "11",
+ number = "4",
+ year = "1991",
+ pages = "349-368",
+ abstract =
+ "This paper is concerned with the type structure of a system including
+ polymorphism, type properties and subtypes. This type system
+ originates from computer algebra but it is not intended to be the
+ solution of all type problems in this area.
+
+ Types (or sets of types) are denoted by terms in some order-sorted
+ algebra. We consider a rewrite relation in this algebra, which is
+ intended to express subtyping. The relations between the semantics and
+ the axiomatization are investigated. It is shown that the problem of
+ type inference is undecidable but a narrowing strategy for
+ semi-decision procedures is described and studied.",
+ paper = "Como91.pdf"
+}
+
+\end{chunk}
+
+\index{Coolsaet, Kris}
+\begin{chunk}{axiom.bib}
+@article{Cool92,
+ author = "Coolsaet, Kris",
+ title = "A Quick Introduction to the Programming Language MIKE",
+ journal = "Sigplan Notices",
+ volume = "27",
+ number = "6",
+ year = "1992",
+ pages = "37-48",
+ abstract =
+ "MIKE is a new programming language developed by the author as a base
+ language for the development of algebraic and symbolic algorithms. It
+ is a structured programming language with a MODULA-2-like syntax
+ supporting special features such as transparent dynamic memory
+ management, discriminated union types, operator overloading, data
+ abstraction and parametrized types. This text gives an overview of the
+ main features of the language as of version 2.0."
+}
+
+\end{chunk}
+
+\subsection{D} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Damas, Luis}
+\index{Milner, Robin}
+\begin{chunk}{axiom.bib}
+@inproceedings{Dama82,
+ author = "Damas, Luis and Milner, Robin",
+ title = "Principal Type-schemes for Functional Programs",
+ booktitle = "POPL 82",
+ pages = "207-212",
+ year = "1982",
+ isbn = "0-89798-065-6",
+ paper = "Dama82.pdf"
+}
+
+\end{chunk}
+
+\index{Davenport, James H.}
+\index{Jenks, Richard D.}
+\begin{chunk}{axiom.bib}
+@techreport{Dave80,
+ author = "Davenport, James H. and Jenks, Richard D.",
+ title = "MODLISP: A Preliminary Design",
+ institution = "IBM Research",
+ type = "Research Report",
+ year = "1980",
+ number = "RC 8073",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Davis, Martin D.}
+\index{Sigal, Ron}
+\index{Weyuker, Elaine J.}
+\begin{chunk}{axiom.bib}
+@book{Davi94,
+ author = "Davis, Martin D. and Sigal, Ron and Weyuker, Elaine J.",
+ title = "Computability, Complexity, and Languages: Fundamentals of
+ Theoretical Computer Science",
+ publisher = "Academic Press",
+ year = "1994",
+ isbn = "978-0122063824"
+}
+
+\end{chunk}
+
+\index{Dershowitz, Nachum}
+\index{Jouannaud, Jean-Pierre}
+\begin{chunk}{axiom.bib}
+@techreport{Ders89,
+ author = "Dershowitz, Nachum and Jouannaud, Jean-Pierre",
+ title = "Rewrite Systems",
+ year = "1989",
+ number = "478",
+ institution = "Laboratoire de Recherche en Informatique",
+ link = "\url{http://www.cs.tau.ac.il/~nachum/papers/survey-draft.pdf}",
+ paper = "Ders89.pdf"
+}
+
+\end{chunk}
+
+\subsection{E} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Ehrig, Hartmut}
+\index{Mahr, Bernd}
+\begin{chunk}{axiom.bib}
+@book{Ehri85,
+ author = "Ehrig, Hartmut and Mahr, Bernd",
+ title = "Fundamentals of Algebraic Specification 1: Equations and
+ Initial Semantics",
+ publisher = "Springer Verlag",
+ year = "1985",
+ isbn = "978-0387137186"
+}
+
+\end{chunk}
+
+\index{Elliott, Conal M.}
+\begin{chunk}{axiom.bib}
+@inproceeding{Elli89,
+ author = "Elliott, Conal M.",
+ title = "Higher-order Unification with Dependent Function Types",
+ booktitle = "Rewriting Techniques and Applications",
+ year = "1989",
+ pages = "121-136",
+ abstract =
+ "Roughly fifteen years ago, Huet developed a complete semidecision
+ algorithm for unification in the simply typed $\lambda$-calculus
+ ($\lambda_\rightarrow$). In spite of the undecidability of this
+ problem, his algorithm is quite usable in practice. Since then, many
+ important applications have come about in such areas as theorem
+ proving, type inference, program transformation, and machine learning.
+
+ Another development is the discovery that by enriching
+ $\lambda_\rightarrow$ to include {\sl dependent function types},
+ the resulting calculus ($\lambda_\Pi$) forms the basis of a very
+ elegant and expressive Logical Framework, encompassing the syntax,
+ rules, and proofs for a wide class of logics.
+
+ This paper presents an algorithm in the spirit of Huet's, for
+ unification in $\lambda_\Pi$. This algorithm gives us the best
+ of both worlds: the automation previously possible in
+ $\lambda_\rightarrow$ and the greatly enriched expressive power of
+ $\lambda_\Pi$. It can be used to considerable advantage in many
+ of the current applications of Huet's algorithm, and has important
+ new applications as well. These include automated and semi-automated
+ theorem proving in encoded logics, and automatic type inference in a
+ variety of encoded languages."
+}
+
+\end{chunk}
+
+\subsection{F} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Farmer, William M.}
+\begin{chunk}{axiom.bib}
+@article{Farm90,
+ author = "Farmer, William M.",
+ title = "A Partial Functions Version of Church's Simple Theory of Types",
+ journal = "The Journal of Symbolic Logic",
+ volume = "55",
+ number = "3",
+ year = "1990",
+ pages = "1269-1291",
+ abstract =
+ "Church's simple theory of types is a system of higher-order logic in
+ which functions are assumed to be total. We present in this paper a
+ version of Church's system called PF in which functions may be
+ partial. The semantics of PF, which is based on Henkin's
+ general-models semantics, allows terms to be nondenoting but requires
+ formulas to always denote a standard truth value. We prove that PF is
+ complete with respect to its semantics. The reasoning mechanism in PF
+ for partial functions corresponds closely to mathematical practice,
+ and the formulation of PF adheres tightly to the framework of
+ Church's system.",
+ paper = "Farm90.pdf"
+}
+
+\end{chunk}
+
+\index{Faxen, Karl-Filip}
+\begin{chunk}{axiom.bib}
+@article{Faxe02,
+ author = "Faxen, Karl-Filip",
+ title = "A Static Sematics for Haskell",
+ year = "2002",
+ journal = "J. Functional Programming",
+ volume = "12",
+ number = "4-5",
+ pages = "295-357",
+ abstract =
+ "This paper gives a static semantics for Haskell 98, a non-strict
+ purely functional programming language. The semantics formally speci
+ es nearly all the details of the Haskell 98 type system, including the
+ resolution of overloading, kind inference (including defaulting) and
+ polymorphic recursion, the only major omission being a proper
+ treatment of ambiguous overloading and its resolution. Overloading is
+ translated into explicit dictionary passing, as in all current
+ implementations of Haskell. The target language of this translation is
+ a variant of the Girard-Reynolds polymorphic lambda calculus featuring
+ higher order polymorphism and explicit type abstraction and
+ application in the term language. Translated programs can thus still
+ be type checked, although the implicit version of this system is
+ impredicative. A surprising result of this formalization e ort is that
+ the monomorphism restriction, when rendered in a system of inference
+ rules, compromises the principal type property.",
+ paper = "Faxe02.pdf"
+}
+
+\end{chunk}
+
\index{Fijalkow, Nathanael}
\begin{chunk}{axiom.bib}
@misc{Fija17,
@@ -566,6 +1465,244 @@ paragraph for those unfamiliar with the terms.
\end{chunk}
+\index{Foderaro, John K.}
+\begin{chunk}{axiom.bib}
+@phdthesis{Fode83,
+ author = "Foderaro, John K.",
+ title = "The Design of a Language for Algebraic Computation Systems",
+ school = "U.C. Berkeley, EECS Dept.",
+ year = "1983",
+ link = "\url{http://digitalassets.lib.berkeley.edu/techreports/ucb/text/CSD-83-160.pdf}",
+ abstract =
+ "This thesis describes the design of a language to support a
+ mathematics-oriented symbolic algebra system. The language, which we
+ have named NEWSPEAK, permits the complex interrelations of
+ mathematical types, such as rings, fields and polynomials to be
+ described. Functions can be written over the most general type that
+ has the required operations and properties and the inherited by
+ subtypes. All function calls are generic, with most function
+ resolution done at compile time. Newspeak is type-safe, yet permits
+ runtime creation of tyhpes.",
+ paper = "Fode83.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Freyd, Peter J.}
+\index{Scedrov, Andre}
+\begin{chunk}{axiom.bib}
+@book{Frey90,
+ author = "Freyd, Peter J. and Scedrov, Andre",
+ title = "Categories, Allegories",
+ publisher = "Elsevier Science",
+ year = "1990",
+ isbn = "0-444-70368-3"
+}
+
+\end{chunk}
+
+\index{Fruehwirth, Thom}
+\index{Shapiro, Ehud}
+\index{Vardi, Moshe Y.}
+\index{Yardeni, Eyal}
+\begin{chunk}{axiom.bib}
+@inproceedings{Frue91,
+ author = "Fruehwirth, Thom and Shapiro, Ehud and Vardi, Moshe Y. and
+ Yardeni, Eyal",
+ title = "Logic programs as types for logic programs",
+ booktitle = "Proc. Sixth Annual IEEE Symp. on Logic in Comp. Sci.",
+ publisher = "IEEE",
+ pages = "300-309",
+ year = "1991",
+ abstract =
+ "Type checking can be extremely useful to the program development process.
+ Of particular interest are descriptive type systems, which let the
+ programmer write programs without having to define or mention types.
+ We consider here optimistic type systems for logic programs. In such
+ systems types are conservative approximations to the success set of the
+ program predicates. We propose the use of logic programs to describe
+ types. We argue that this approach unifies the denotational and
+ operational approaches to descriptive type systems and is simpler
+ and more natural than previous approaches. We focus on the use of
+ unary-predicate programs to describe types. We identify a proper class
+ of unary-predicate programs and show that it is expressive enough to
+ express several notions of types. We use an analogy with 2-way automata
+ and a correspondence with alternating algorithms to obtain a complexity
+ characterization of type inference and type checking. This
+ characterization was facilitated by the use of logic programs to
+ represent types.",
+ paper = "Frue91.pdf"
+}
+
+\end{chunk}
+
+\index{Fuh, You-Chin}
+\index{Mishra, Prateek}
+\begin{chunk}{axiom.bib}
+@article{Fuhx89,
+ author = "Fuh, You-Chin and Mishra, Prateek",
+ title = "Polymorphic Subtype Inference -- Closing the Theory-Practice Gap",
+ journal = "Lecture Notes in Computer Science",
+ volume = "352",
+ year = "1989",
+ pages = "167-183",
+ paper = "Fuhx89.pdf"
+}
+
+\end{chunk}
+
+\index{Fuh, You-Chin}
+\index{Mishra, Prateek}
+\begin{chunk}{axiom.bib}
+@article{Fuhx90,
+ author = "Fuh, You-Chin",
+ title = "Type Inference with Subtypes",
+ journal = "Theoretical Computer Science",
+ volume = "73",
+ number = "2",
+ year = "1990",
+ pages = "155-175",
+ abstract =
+ "We extend polymorphic type inference with a very general notion of
+ subtype based on the concept of type transformation. This paper
+ describes the following results. We prove the existence of (i)
+ principal type property and (ii) syntactic completeness of the
+ type-checker, for type inference with subtypes. This result is
+ developed with only minimal assumptions on the underlying theory of
+ subtypes. As a consequence, it can be used as the basis for type
+ inference with a broad class of subtype theories. For a particular
+ “structural” theory of subtypes, those engendered by inclusions
+ between type constants only, we show that principal types are
+ compactly expressible. This suggests that type inference for the
+ structured theory of subtypes is feasible. We describe algorithms
+ necessary for such a system. The main algorithm we develop is called
+ MATCH, an extension to the classical unification algorithm. A proof of
+ correctness for MATCH is given.",
+ paper = "Fuhx90.pdf"
+}
+
+\end{chunk}
+
+\subsection{G} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\begin{chunk}{axiom.bib}
+@misc{GAPx17,
+ author = "The GAP Group",
+ title = "GAP - Reference Manual",
+ year = "2017",
+ link = "\url{https://www.gap-system.org/Manuals/doc/ref/manual.pdf}"
+}
+
+\end{chunk}
+
+\index{Girard, Jean-Yves}
+\begin{chunk}{axiom.bib}
+@phdthesis{Gira72,
+ author = "Girard, Jean-Yves",
+ title = {Intrpr\'etation fontionelle et \'elimination des coupures de
+ l'arithm\'etique d'orde sup\'erieur},
+ institution = {Universit\'e Paris VII},
+ year = "1972"
+}
+
+\end{chunk}
+
+\index{Girard, Jean-Yves}
+\index{Taylor, Paul}
+\index{Lafont, Yves}
+\begin{chunk}{axiom.bib}
+@book{Gira89,
+ author = "Girard, Jean-Yves",
+ title = "Proofs and Types",
+ publisher = "Cambridge University Press",
+ year = "1989"
+}
+
+\end{chunk}
+
+\index{Godel, Kurt}
+\begin{chunk}{axiom.bib}
+@misc{Gode58,
+ author = "Godel, Kurt",
+ title = {\"Uber eine bisher noch nicht benutzte Erweiterung des Finiten
+ Standpunktes},
+ journal = "Dialectica 12",
+ year = "1958",
+ pages = "280-287"
+}
+
+\end{chunk}
+
+\index{Goguen, Joseph}
+\index{Meseguer, Jose}
+\begin{chunk}{axiom.bib}
+@techreport{Gogu89,
+ author = "Goguen, Joseph and Meseguer, Jose",
+ title = "Order-sorted Algebra I : Equational Deduction for Multiple
+ Inheritance, Overloading, Exceptions, and Partial Operations",
+ type = "technical report",
+ institution = "SRI International",
+ year = "1989",
+ number = "SRIR 89-10"
+}
+
+\end{chunk}
+
+\index{Goguen, Joseph}
+\index{Meseguer, Jose}
+\begin{chunk}{axiom.bib}
+@article{Gogu92,
+ author = "Goguen, Joseph and Meseguer, Jose",
+ title = "Order-sorted Algebra I : Equational Deduction for Multiple
+ Inheritance, Overloading, Exceptions, and Partial Operations",
+ journal = "Theoretical Computer Science",
+ volume = "105",
+ number = "2",
+ year = "1992",
+ pages = "217-273",
+ abstract =
+ "This paper generalizes many-sorted algebra (MSA) to order-sorted
+ algebra (OSA) by allowing a partial ordering relation on the set of
+ sorts. This supports abstract data types with multiple inheritance (in
+ roughly the sense of object-oriented programming), several forms of
+ polymorphism and overloading, partial operations (as total on
+ equationally defined subsorts), exception handling, and an operational
+ semantics based on term rewriting. We give the basic algebraic
+ constructions for OSA, including quotient, image, product and term
+ algebra, and we prove their basic properties, including quotient,
+ homomorphism, and initiality theorems. The paper's major mathematical
+ results include a notion of OSA deduction, a completeness theorem for
+ it, and an OSA Birkhoff variety theorem. We also develop conditional
+ OSA, including initiality, completeness, and McKinsey-Malcev
+ quasivariety theorems, and we reduce OSA to (conditional) MSA, which
+ allows lifting many known MSA results to OSA. Retracts, which
+ intuitively are left inverses to subsort inclusions, provide
+ relatively inexpensive run-time error handling. We show that it is
+ safe to add retracts to any OSA signature, in the sense that it gives
+ rise to a conservative extension. A final section compares and
+ contrasts many different approaches to OSA. This paper also includes
+ several examples demonstrating the flexibility and applicability of
+ OSA, including some standard benchmarks like stack and list, as well
+ as a much more substantial example, the number hierarchy from the
+ naturals up to the quaternions.",
+ paper = "Gogu92.pdf"
+}
+
+\end{chunk}
+
+\index{Goldberg, Adele}
+\index{Robson, David}
+\begin{chunk}{axiom.bib}
+@book{Gold83,
+ author = "Goldberg, Adele and Robson, David",
+ title = "Smalltalk-80: The Language and Its Implementation",
+ publisher = "Addison-Wesley",
+ year = "1983"
+}
+
+\end{chunk}
+
\index{Gollan, H.}
\index{Grabmeier, Johannes}
\begin{chunk}{axiom.bib}
@@ -654,6 +1791,208 @@ paragraph for those unfamiliar with the terms.
\end{chunk}
+\index{Gries, David}
+\begin{chunk}{axiom.bib}
+@book{Grie78,
+ author = "Gries, David",
+ title = "Programming Methodology",
+ publisher = "Springer-Verlag",
+ year = "1978"
+}
+
+\end{chunk}
+
+\index{Graetzer, George}
+\begin{chunk}{axiom.bib}
+@book{Grae79,
+ author = "Graetzer, George",
+ title = "Universal Algebra",
+ publisher = "Springer",
+ isbn = "978-0-387-77486-2",
+ year = "1979",
+ paper = "Grae79.pdf"
+}
+
+\end{chunk}
+
+\subsection{H} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Harper, Robert}
+\index{Honsell, Furio}
+\index{Plotkin, Gordon}
+\begin{chunk}{axiom.bib}
+@article{Harp93,
+ author = "Harper, Robert and Honsell, Furio and Plotkin, Gordon",
+ title = "A Framework for Defining Logics",
+ journal = "J. ACM",
+ volume = "40",
+ number = "1",
+ year = "1993",
+ pages = "143-184",
+ abstract =
+ "The Edinburgh Logical Framework (LF) provides a means to define (or
+ present) logics. It is based on a general treatment of syntax, rules,
+ and proofs by means of a typed $\lambda$-calculus with dependent
+ types. Syntax is treated in a style similar to, but more general than,
+ Martin-Lof's system of arities. The treatment of rules and proofs
+ focuses on his notion of a judgment. Logics are represented in LF via
+ a new principle, the judgments as types principle, whereby each
+ judgment is identified with the type of its proofs. This allows for a
+ smooth treatment of discharge and variable occurence conditions and
+ leads to a uniform treatment of rules and proofs whereby rules are
+ viewed as proofs of higher-order judgments and proof checking is
+ reduced to type checking. The practical benefit of our treatment of
+ formal systems is that logic-independent tools, such as proof editors
+ and proof checkers, can be constructed.",
+ paper = "Harp93.pdf"
+}
+
+\end{chunk}
+
+\index{Hindley, R.}
+\begin{chunk}{axiom.bib}
+@article{Hind69,
+ author = "Hindley, R.",
+ title = "The Principal Type-Scheme of an Object in Combinatory Logic",
+ journal = "Trans. AMS",
+ volume = "146",
+ year = "1969",
+ pages = "29-60",
+ paper = "Hind69.pdf"
+}
+
+\end{chunk}
+
+\index{Hodges, Wilfrid}
+\begin{chunk}{axiom.bib}
+@article{Hodg95,
+ author = "Hodges, Wilfrid",
+ title = "The Meaning of Specifications I: Domains and Initial Models",
+ journal = "Theoretical Computer Science",
+ volume = "192",
+ issue = "1",
+ year = "1995",
+ pages = "67-89",
+ abstract =
+ "This is the first of a short series of papers intended to provide one
+ common semantics for several different types of specification
+ language, in order to allow comparison and translations. The
+ underlying idea is that a specification describes the behaviour of a
+ system, depending on parameters. We can represent this behaviour as a
+ functor which acts on structures representing the parameters, and
+ which yields a structure representing the behaviour. We characterise
+ in domain-theoretic terms the class of functors which could in
+ principle be specified and implemented; briefly, they are the functors
+ which preserve directed colimits and whose restriction to finitely
+ presented structures is recursively enumerable. We also characterise
+ those functors which allow specification by initial semantics in
+ universal Horn classes with finite vocabulary; these functors consist
+ of a free functor (i.e. left adjoint of a forgetful functor) followed
+ by a forgetful functor. The main result is that these two classes of
+ functor are the same up to natural isomorphism.",
+ paper = "Hodg95.pdf"
+}
+
+\end{chunk}
+
+\index{Howe, Douglas J.}
+\begin{chunk}{axiom.bib}
+@techreprot{Howe87,
+ author = "Howe, Douglas J.",
+ title = "The Computational Behaviour of Girard's Paradox",
+ institution = "Cornell University",
+ year = "1987",
+ link = "\url{https://ecommons.cornell.edu/handle/1813/6660}",
+ number = "TR 87-820",
+ abstract =
+ "In their paper ``Type'' Is Not a Type, Meyer and Reinhold argued that
+ serious pathologies can result when a type of all types is added to a
+ programing language with dependent types. Central to their argument is
+ the claim that by following the proof of Girard's paradox it is
+ possible to construct in their calculus $\lambda^{\tau \tau}$ a term
+ having a fixed-point property. Because of the tremendous amount of
+ formal detail involved, they were unable to establish this claim. We
+ have made use of the Nuprl proof development system in constructing a
+ formal proof of Girard's paradox and analysing the resulting term. We
+ can show that the term does not have the desired fixed-point property,
+ but does have a weaker form of it that is sufficient to establish some
+ of the results of Meyer and Reinhold. We believe that the method used
+ here is in itself of some interest, representing a new kind of
+ application of a computer to a problem in symbolic logic."
+}
+
+\end{chunk}
+
+\index{Hudak, Paul}
+\index{Jones, Simon Peyton}
+\index{Wadler, Philip}
+\index{Boutel, Brian}
+\index{Fairbairn, Jon}
+\index{Fasel, Joseph}
+\index{Guzman, Maria M.}
+\index{Hammond, Kevin}
+\index{Hughes, John}
+\index{Johnsson, Thomas}
+\index{Kieburtz, Dick}
+\index{Nikhil, Rishiyur}
+\index{Patrain, Will}
+\index{Peterson, John}
+\begin{chunk}{axiom.bib}
+@article{Huda92,
+ author = "Hudak, Paul and Jones, Simon Peyton and Wadler, Philip and
+ Boutel, Brian and Fairbairn, Jon and Fasel, Joseph and
+ Guzman, Maria M. and Hammond, Kevin and Hughes, John and
+ Johnsson, Thomas and Kieburtz, Dick and Nikhil, Rishiyur and
+ Patrain, Will and Peterson, John",
+ title = "Report on the Programming Language Haskell, a non-strict
+ functional language version 1.2",
+ journal = "ACM SIGPLAN Notices",
+ volume = "27",
+ number = "5",
+ year = "1992",
+ pages = "1-164",
+ abstract =
+ "Some half dozen persons have written technically on combinatory
+ logic, and most of these, including ourselves, have published
+ something erroneous. Since some of our fellow sinners are among the
+ most careful and competent logicians on the contemporary scene, we
+ regard this as evidence that the subject is refractory. Thus fullness
+ of exposition is necessory for accurary; and excessive condensation
+ would be false economy here, even more than it is ordinarily."
+}
+
+\end{chunk}
+
+\index{Hudak, Paul}
+\index{Peterson, John}
+\index{Fasel, Joseph H.}
+\begin{chunk}{axiom.bib}
+@misc{Huda99,
+ author = "Hudak, Paul and Peterson, John and Fasel, Joseph H.",
+ title = "A Gentle Introduction to Haskell 98",
+ year = "1999",
+ link = "\url{https://www.haskell.org/tutorial/haskell-98-tutorial.pdf}",
+ paper = "Huda99.pdf"
+}
+
+\end{chunk}
+
+\index{Huet, Gerard}
+\index{Plotkin, G.}
+\begin{chunk}{axiom.bib}
+@book{Huet91,
+ author = "Huet, Gerard and Plotkin, G.",
+ title = "Logical Frameworks",
+ publisher = "Cambridge University",
+ year = "1991"
+}
+
+\end{chunk}
+
+\subsection{I} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\subsection{J} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
\index{James, G.}
\index{Kerber, A.}
\begin{chunk}{axiom.bib}
@@ -669,6 +2008,943 @@ paragraph for those unfamiliar with the terms.
\end{chunk}
+\index{Jones, Simon Peyton}
+\begin{chunk}{axiom.bib}
+@book{Jone87,
+ author = "Jones, Simon Peyton",
+ title = "The Implementation of Functional Programming Languages",
+ publisher = "Simon and Schuster",
+ year = "1987",
+ isbn = "0-13-453333-X",
+ paper = "Jone87.pdf"
+}
+
+\end{chunk}
+
+\index{Jouannaud, Jean-Pierre}
+\index{Kirchner, Claude}
+\begin{chunk}{axiom.bib}
+@book{Joua90,
+ author = "Jouannaud, Jean-Pierre and Kirchner, Claude",
+ title = "Solving Equations in Abstract Algebras: A Rule-based Survey of
+ Unification",
+ year = "1990",
+ publisher = "Universite do Paris-Sud"
+}
+
+\end{chunk}
+
+\index{Jouannaud, Jean Pierre}
+\index{Okada, Mitsuhiro}
+\begin{chunk}{axiom.bib}
+@inproceedings{Joua91,
+ author = "Jouannaud, Jean Pierre and Okada, Mitsuhiro",
+ title = "A Computation Model for Executable Higher-order Algebraic
+ Specification Languages",
+ booktitle = "Symposium on Logic in Computer Science",
+ pages = "350-361",
+ isbn = "081862230X",
+ year = "1991",
+ abstract =
+ "The combination of (polymorphically) typed lambda-calculi with
+ first-order as well as higher-order rewrite rules is considered. The
+ need of such a combination for exploiting the benefits of
+ algebraically defined data types within functional programming is
+ demonstrated. A general modularity result, which allows as particular
+ cases primitive recursive functionals of higher types, transfinite
+ recursion of higher types, and inheritance for all types, is
+ proved. The class of languages considered is first defined, and it is
+ shown how to reduce the Church-Rosser and termination (also called
+ strong normalization) properties of an algebraic functional language
+ to a so-called principal lemma whose proof depends on the property to
+ be proved and on the language considered. The proof of the principal
+ lemma is then sketched for various languages. The results allows
+ higher order rules defining the higher-order constants by a certain
+ generalization of primitive recursion. A prototype of such primitive
+ recursive definitions is provided by the definition of the map
+ function for lists.",
+ paper = "Joua91.pdf"
+}
+
+\end{chunk}
+
+\subsection{K} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Kaes, Stefan}
+\begin{chunk}{axiom.bib}
+@article{Kaes92,
+ author = "Kaes, Stefan",
+ title = "Type Inference in the Presence of Overloading, Subtyping, and
+ Recursive Types",
+ journal = "LISP Pointers",
+ volume = "V",
+ number = "1",
+ pages = "193-204",
+ year = "1992",
+ paper = "Kaes92.pdf"
+}
+
+\end{chunk}
+
+\index{Kaltofen, E.}
+\begin{chunk}{axiom.bib}
+@incollection{Kalt83a,
+ author = "Kaltofen, E.",
+ title = "Factorization of Polynomials",
+ booktitle = "Computer Algebra - Symbolic and Algebraic Computation",
+ pages = "95-113",
+ year = "1983",
+ abstract =
+ "Algorithms for factoring polynomials in one or more variables over
+ various coefficient domains are discussed. Special emphasis is given
+ to finite fields, the integers, or algebraic extensions of the
+ rationals, and to multivariate polynomials with integral coefficients.
+ In particular, various squarefree decomposition algorithms and Hensel
+ lifting techniques are analyzed. An attempt is made to establish a
+ complete historic trace for today’s methods. The exponential worst
+ case complexity nature of these algorithms receives attention.",
+ paper = "Kalt83a.pdf"
+}
+
+\end{chunk}
+
+\index{Kanellakis, Paris C.}
+\index{Mairson, Harry G.}
+\index{Mitchell, John C.}
+\begin{chunk}{axiom.bib}
+@techreport{Kane90,
+ author = "Kanellakis, Paris C. and Mairson, Harry G. and Mitchell, John C.",
+ title = "Unification and ML Type Reconstruction",
+ link = "\url{ftp://ftp.cs.brown.edu/pub/techreports/90/cs90-26.pdf}",
+ institution = "Brown University",
+ year = "1990",
+ number = "CS-90-26",
+ abstract =
+ "We study the complexity of type reconstruction for a core fragment of
+ ML with lambda abstraction, function application, and the polymorphic
+ {\bf let} declaration. We derive exponential upper and lower bounds on
+ recognizing the typable core ML expressions. Our primary technical
+ tool is unification of succinctly represented type expressions. After
+ observing that core ML expressions, of size $n$, can be typed in
+ DTIME($s^n$), we exhibit two different families of programs whose
+ principal types grow exponentially. We show how to exploit the
+ expressiveness of the {\bf let}-polymorphism in these constructions to
+ derive lower bounds on deciding typability: one leads naturally to
+ NP-hardness and the other to DTIME($2^{n^k}$)-hardness for each integer
+ $k\ge 1$. Our generic simulation of any exponential time Turing
+ Machine by ML type reconstruction may be viewed as a nonstandard way
+ of computing with types. Our worse-case lower bounds stand in contrast
+ to practical experience, which suggests that commonly used algorithms
+ for type reconstruction do not slow compilation substantially.",
+ paper = "Kane90.pdf"
+}
+
+\end{chunk}
+
+\index{Kfoury, A.J.}
+\index{Tiuryn, J.}
+\index{Utzyczyn, P.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Kfou88,
+ author = "Kfoury, A.J. and Tiuryn, J. and Utzyczyn, P.",
+ title = "A Proper Extension of ML with an Effective Type-Assignment",
+ booktitle = "POPL 88",
+ year = "1988",
+ pages = "58-69",
+ abstract =
+ "We extend the functional language ML by allowing the recursive calls
+ to a function F on the right-hand side of its definition to be at
+ different types, all generic instances of the (derived) type of F on
+ the left-hand side of its definition. The original definition of ML
+ does not allow this feature. This extension does not produce new types
+ beyond the usual universal polymorphic types of ML and satisfies the
+ properties already enjoyed by ML: the principal-type property and the
+ effective type-assignment property.",
+ paper = "Kfou88.pdf"
+}
+
+\end{chunk}
+
+\index{Kfoury, A. J.}
+\index{Tiuryn, J.}
+\index{Urzyczyn, P.}
+\begin{chunk}{axiom.bib}
+@article{Kfou93,
+ author = "Kfoury, A. J. and Tiuryn, J. and Urzyczyn, P.",
+ title = "The Undecidability of the Semi-unification Problem",
+ journal = "Information and Computation",
+ volume = "102",
+ number = "1",
+ year = "1993",
+ pages = "83-101",
+ abstract =
+ "The Semi-Unification Problem (SUP) is a natural generalization of
+ both first-order unification and matching. The problem arises in
+ various branches of computer science and logic. Although several
+ special cases of SUP are known to be decidable, the problem in general
+ has been open for several years. We show that SUP in general is
+ undecidable, by reducing what we call the ``boundedness problem'' of
+ Turing machines to SUP. The undecidability of this boundedness problem
+ is established by a technique developed in the mid-1960s to prove
+ related results about Turing machines.",
+ paper = "Kfou93.pdf"
+}
+
+\end{chunk}
+
+\index{Kifer, Michael}
+\index{Wu, James}
+\begin{chunk}{axiom.bib}
+@inproceedings{Kife91,
+ author = "Kifer, Michael and Wu, James",
+ title = "A First-order Theory of Types and Polymorphism in Logic
+ Programming",
+ booktitle = "Proc Sixth Annual IEEE Symp. on Logic in Comp. Sci.",
+ year = "1991",
+ pages = "310-321",
+ abstract =
+ "A logic called typed predicate calculus (TPC) that gives declarative
+ meaning to logic programs with type declarations and type inference is
+ introduced. The proper interaction between parametric and inclusion
+ varieties of polymorphism is achieved through a construct called type
+ dependency, which is analogous to implication types but yields more
+ natural and succinct specifications. Unlike other proposals where
+ typing has extra-logical status, in TPC the notion of type-correctness
+ has precise model-theoretic meaning that is independent of any
+ specific type-checking or type-inference procedure. Moreover, many
+ different approaches to typing that were proposed in the past can be
+ studied and compared within the framework of TPC. Another novel
+ feature of TPC is its reflexivity with respect to type declarations;
+ in TPC, these declarations can be queried the same way as any other
+ data. Type reflexivity is useful for browsing knowledge bases and,
+ potentially, for debugging logic programs.",
+ paper = "Kife91.pdf"
+}
+
+\end{chunk}
+
+\index{Kirkerud, Bjorn}
+\begin{chunk}{axiom.bib}
+@book{Kirk89,
+ author = "Kirkerud, Bjorn",
+ title = "Object-Oriented Programming With Simula",
+ year = "1989",
+ series = "International Computer Science Series",
+ publisher = "Addison-Wesley"
+}
+
+\end{chunk}
+
+\index{Klop, J.W.}
+\begin{chunk}{axiom.bib}
+@techreport{Klop90,
+ author = "Klop, J. W.",
+ title = "Term Rewriting Systems",
+ institution = "Stichting Methematisch Centrum",
+ year = "1990",
+ number = "CS-R9073",
+ abstract =
+ "Term Rewriting Systems play an important role in various areas, such
+ as abstract data type specifications, implementations of functional
+ programming languages and automated deduction. In this chapter we
+ introduce several of the basic concepts and facts for
+ TRSs. Specifically, we discuss Abstract Reduction Systems; general
+ Term Rewriting Systems including an account of Knuth-Bendix completion
+ and (E- )unification; orthogonal TRSs and reduction strategies;
+ strongly sequential orthogonal TRS. Finally some extended rewrite
+ formates are introduced: Conditional TRSs and Combinatory Reduction
+ Systems. The emphasis throughout the paper is on providing information
+ of a syntactic nature."
+}
+
+\end{chunk}
+
+\index{Kowalsky, Hans Joachim}
+\begin{chunk}{axiom.bib}
+@book{Kowa63,
+ author = "Kowalsky, Hans Joachim",
+ title = "Linear Algebra",
+ year = "1963",
+ publisher = "Walter de Gruyter",
+ comment = "(German)"
+}
+
+\end{chunk}
+
+\subsection{L} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Lang, Serge}
+\begin{chunk}{axiom.bib}
+@book{Lang05,
+ author = "Lang, Serge",
+ title = "Algebra",
+ publisher = "Springer",
+ year = "2005",
+ series = "Graduate Texts in Mathematics",
+ isbn = "978-0387953854"
+}
+
+\end{chunk}
+
+\index{Lauer, M.}
+\begin{chunk}{axiom.bib}
+@InCollection{Laue82,
+ author = "Lauer, M.",
+ title = "Computing by Homomorphic Images",
+ booktitle = "Computer Algebra: Symbolic and Algebraic Computation",
+ pages = "139-168",
+ year = "1982",
+ publisher = "Springer",
+ isbn = "978-3-211-81684-4",
+ abstract =
+ "After explaining the general technique of Computing by homomorphic
+ images, the Chinese remainder algorithm and the Hensel lifting
+ construction are treated extensively. Chinese remaindering is first
+ presented in an abstract setting. Then the specialization to Euclidean
+ domains, in particular $\mathbb{Z}$, $\mathbb{K}[y]$, and
+ $\mathbb{Z}[y_1,\ldots,y_n]$ is considered. For both techniques,
+ Chinese remaindering as well as the lifting algorithms, a complete
+ computational example is presented and the most frequent application
+ is discussed."
+}
+
+\end{chunk}
+
+\index{Leiss, Hans}
+\begin{chunk}{axiom.bib}
+@inproceedings{Leis87,
+ author = "Leiss, Hans",
+ title = "On Type Inference for Object-Oriented Programming Languages",
+ booktitle = "Int. Workshop on Computer Science Logic",
+ year = "1987",
+ pages = "151-172",
+ abstract =
+ "We present a type inference calculus for object-oriented programming
+ languages. Explicit polymorphic types, subtypes and multiple
+ inheritance are allowed. Class types are obtained by selection from
+ record types, but not considered subtypes of record types. The subtype
+ relation for class types reflects the (mathematically clean)
+ properties of subclass relations in object-oriented programming to a
+ better extend than previous systems did.
+
+ Based on Mitchells models for type inference, a semantics for types is
+ given where types are sets of values in a model of type-free lambda
+ calculus. For the sublanguage without type quantifiers and subtype
+ relation, automatic type inference is possible by extending Milners
+ algorithm W to deal with a polymorphic fixed-point rule."
+}
+
+\end{chunk}
+
+\index{Limongelli, C.}
+\index{Temperini, M.}
+\begin{chunk}{axiom.bib}
+@article{Limo92,
+ author = "Limongelli, C. and Temperini, M.",
+ title = "Abstract Specification of Structures and Methods in Symbolic
+ Mathematical Computation",
+ journal = "Theoretical Computer Science",
+ volume = "104",
+ year = "1992",
+ pages = "89-107",
+ abstract =
+ "This paper describes a methodology based on the object-oriented
+ programming paradigm, to support the design and implementation of a
+ symbolic computation system. The requirements of the system are
+ related to the specification and treatment of mathematical
+ structures. This treatment is considered from both the numerical and
+ the symbolic points of view. The resulting programming system should
+ be able to support the formal definition of mathematical data
+ structures and methods at their highest level of abstraction, to
+ perform computations on instances created from such definitions, and
+ to handle abstract data structures through the manipulation of their
+ logical properties. Particular consideration is given to the
+ correctness aspects. Some examples of convenient application of the
+ proposed design methodology are presented.",
+ paper = "Limo92.pdf"
+}
+
+\end{chunk}
+
+\index{Lincoln, Patrick}
+\index{Mitchell, John C.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Linc92,
+ author = "Lincoln, Patrick and Mitchell, John C.",
+ title = "Algorithmic Aspects of Type Inference with Subtypes",
+ booktitle = "POPL 92",
+ pages = "293-304",
+ year = "1992",
+ abstract =
+ "We study the complexity of type inference for programming languages
+ with subtypes. There are three language variations that effect the
+ problem: (i) basic functions may have polymorphic or more limited
+ types, (ii) the subtype hierarchy may be fixed or vary as a result of
+ subtype declarations within a program, and (iii) the subtype hierarchy
+ may be an arbitrary partial order or may have a more restricted form,
+ such as a tree or lattice. The naive algorithm for infering a most
+ general polymorphic type, undervariable subtype hypotheses, requires
+ deterministic exponential time. If we fix the subtype ordering, this
+ upper bound grows to nondeterministic exponential time. We show that
+ it is NP-hard to decide whether a lambda term has a type with respect
+ to a fixed subtype hierarchy (involving only atomic type names). This
+ lower bound applies to monomorphic or polymorphic languages. We give
+ PSPACE upper bounds for deciding polymorphic typability if the subtype
+ hierarchy has a lattice structure or the subtype hierarchy varies
+ arbitrarily. We also give a polynomial time algorithm for the limited
+ case where there are of no function constants and the type hierarchy
+ is either variable or any fixed lattice.",
+ paper = "Linc92.pdf"
+}
+
+\end{chunk}
+
+\index{Loos, Rudiger}
+\begin{chunk}{axiom.bib}
+@article{Loos72,
+ author = "Loos, Rudiger",
+ title = "Algebraic Algorithm Descriptions as Programs",
+ journal = "ACM SIGSAM Bulletin",
+ volume = "23",
+ year = "1972",
+ pages = "16-24",
+ abstract =
+ "We propose methods for writing algebraic programs in an algebraic
+ notation. We discuss the advantages of this approach and a specific
+ example",
+ paper = "Loos72.pdf"
+}
+
+\end{chunk}
+
+\index{Loos, Rudiger}
+\begin{chunk}{axiom.bib}
+@article{Loos76,
+ author = "Loos, Rudiger",
+ title = "The Algorithm Description Language (ALDES) (report)",
+ journal = "ACM SIGSAM Bulletin",
+ volume = "10",
+ number = "1",
+ year = "1976",
+ pages = "14-38",
+ abstract =
+ "ALDES is a formalization of the method to describe algorithms used in
+ Knuth's books. The largest documentation of algebraic algorithms,
+ Collins' SAC system for Computer Algebra, is written in this
+ language. In contrast to PASCAL it provides automatic storage
+ deallocation. Compared to LISP equal emphasis was placed on efficiency
+ of arithmetic, list processing, and array handling. To allow the
+ programmer full control of efficiency all mechanisms of the system are
+ accessible to him. Currently ALDES is available as a preprocessor to
+ ANSI Fortran, using no additional primitives.",
+ paper = "Loos76.pdf"
+}
+
+\end{chunk}
+
+\index{Loos, Ruediger G. K.}
+\begin{chunk}{axiom.bib}
+@article{Loos74,
+ author = "Loos, Ruediger G. K.",
+ title = "Toward a Formal Implementation of Computer Algebra",
+ journal = "SIGSAM",
+ volume = "8",
+ number = "3",
+ pages = "9-16",
+ year = "1974",
+ abstract =
+ "We consider in this paper the task of synthesizing an algebraic
+ system. Today the task is significantly simpler than in the pioneer
+ days of symbol manipulation, mainly because of the work done by the
+ pioneers in our area, but also because of the progress in other areas
+ of Computer Science. There is now a considerable collection of
+ algebraic algorithms at hand and a much better understanding of data
+ structures and programming constructs than only a few years ago.",
+ paper = "Loos74.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Loos, Rudiger}
+\index{Collins, George E.}
+\begin{chunk}{axiom.bib}
+@book{Loos92,
+ author = "Loos, Rudiger and Collins, George E.",
+ title = "Revised Report on the ALgorithm Language ALDES",
+ publisher = "Institut fur Informatik",
+ year = "1992"
+}
+
+\end{chunk}
+
+\subsection{M} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{MacLane, Saunders}
+\begin{chunk}{axiom.bib}
+@book{Macl91,
+ author = "MacLane, Saunders",
+ title = "Categories for the Working Mathematician",
+ publisher = "Springer",
+ year = "1991",
+ isbn = "0-387-98403-8",
+ link = "\url{http://www.maths.ed.ac.uk/~aar/papers/maclanecat.pdf}",
+ paper = "Macl91.pdf"
+}
+
+\end{chunk}
+
+\index{MacLane, Saunders}
+\begin{chunk}{axiom.bib}
+@book{Macl92,
+ author = "MacLane, Saunders",
+ title = "Sheaves in Geometry and Logic: A First Introduction to Topos
+ Theory",
+ year = "1992",
+ isbn = "978-0-387-97710-2",
+ publisher = "Springer"
+}
+
+\end{chunk}
+
+\index{Manes, Ernest G.}
+\begin{chunk}{axiom.bib}
+@book{Mane76,
+ author = "Manes, Ernest G.",
+ title = "Algebraic Theories",
+ publisher = "Springer",
+ year = "1976",
+ series = "Graduate Texts in Mathematics",
+ isbn = "978-1-9860-1"
+}
+
+\end{chunk}
+
+\index{Marcus, Daniel A.}
+\begin{chunk}{axiom.bib}
+@book{Marc77,
+ author = "Marcus, Daniel A.",
+ title = "Number Fields",
+ publisher = "Springer",
+ year = "1977",
+ isbn = "978-0387902791"
+}
+
+\end{chunk}
+
+\index{Meyer, Albert R.}
+\index{Reinhold, Mark B.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Meye86,
+ author = "Meyer, Albert R. and Reinhold, Mark B.",
+ title = "Type is not a type",
+ booktitle = "POPL 86",
+ pages = "287-295",
+ abstract =
+ "A function has a dependent type when the type of its result
+ depends upon the value of its argument. Dependent types originated in
+ the type theory of intuitionistic mathematics and have reappeared
+ independently in programming languages such as CLU, Pebble, and
+ Russell. Some of these languages make the assumption that there exists
+ a type-of-all-types which is its own type as well as the type
+ of all other types. Girard proved that this approach is inconsistent
+ from the perspective of intuitionistic logic. We apply Girard's
+ techniques to establish that the type-of-all-types assumption creates
+ serious pathologies from a programming perspective: a system using
+ this assumption is inherently not normalizing, term equality is
+ undecidable, and the resulting theory fails to be a conservative
+ extension of the theory of the underlying base types. The failure of
+ conservative extension means that classical reasoning about programs
+ in such a system is not sound.",
+}
+
+\end{chunk}
+
+\index{Meyer, Bertrand}
+\begin{chunk}{axiom.bib}
+@book{Meye88,
+ author = "Meyer, Bertrand",
+ title = "Object-Oriented Software Construction",
+ year = "1988",
+ publisher = "Prentice Hall",
+ link = "\url{https://sophia.javeriana.edu.co/~cbustaca/docencia/POO-2016-01/documentos/Object%20Oriented%20Software%20Construction-Meyer.pdf}",
+ paper = "Meye88.pdf"
+}
+
+\end{chunk}
+
+\index{Milner, R.}
+\index{Torte, M.}
+\index{Harper, R.}
+\begin{chunk}{axiom.bib}
+@book{Miln90,
+ author = "Milner, Robin and Torte, Mads and Harper, Robert",
+ title = "The Definition of Standard ML",
+ publisher = "Lab for Foundations of Computer Science, Univ. Edinburgh",
+ link = "\url{http://sml-family.org/sml90-defn.pdf}",
+ year = "1990",
+ paper = "Miln90.pdf"
+}
+
+\end{chunk}
+
+\index{Milner, R.}
+\index{Torte, M.}
+\begin{chunk}{axiom.bib}
+@book{Miln91,
+ author = "Milner, Robin and Torte, Mads",
+ title = "Commentary on Standard ML",
+ publisher = "Lab for Foundations of Computer Science, Univ. Edinburgh",
+ link = "\url{https://pdfs.semanticscholar.org/d199/16cbbda01c06b6eafa0756416e8b6f15ff44.pdf}",
+ year = "1991",
+ paper = "Miln91.pdf"
+}
+
+\end{chunk}
+
+\index{Mitchell, John C.}
+\begin{chunk}{axiom.bib}
+@article{Mitc91,
+ author = "Mitchell, John C.",
+ title = "TYpe Inference with Simple Subtypes",
+ journal = "J. of Functional Programming",
+ volume = "1",
+ number = "3",
+ year = "1991",
+ pages = "245-285",
+ abstract =
+ "Subtyping appears in a variety of programming languages, in the form
+ of the ‘automatic coercion’ of integers to reals, Pascal subranges,
+ and subtypes arising from class hierarchies in languages with
+ inheritance. A general framework based on untyped lambda calculus
+ provides a simple semantic model of subtyping and is used to
+ demonstrate that an extension of Curry's type inference rules are
+ semantically complete. An algorithm G for computing the most general
+ typing associated with any given expression, and a restricted,
+ optimized algorithm GA using only atomic subtyping hypotheses are
+ developed. Both algorithms may be extended to insert type conversion
+ functions at compile time or allow polymorphic function declarations
+ as in ML.",
+ paper = "Mitc91.pdf"
+}
+
+\end{chunk}
+
+\index{Mitchell, John C.}
+\begin{chunk}{axiom.bib}
+@InCollection{Mitc91a,
+ author = "Mitchell, John C.",
+ title = "Type Systems for Programming Languages",
+ booktitle = "Handbook of Theoretical Computer Science (Vol B.)",
+ pages = "365-458",
+ year = "1991",
+ publisher = "MIT Press",
+ isbn = "0-444-88074-7"
+}
+
+\end{chunk}
+
+\index{Monk, J. Donald}
+\begin{chunk}{axiom.bib}
+@book{Monk76,
+ author = "Monk, J. Donald",
+ title = "Mathematical Logic",
+ publisher = "Springer",
+ year = "1976",
+ isbn = "978-1-4684-9452-5"
+}
+
+\end{chunk}
+
+\subsection{N} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Nipkow, Tobias}
+\index{Snelting, Gregor}
+\begin{chunk}{axiom.bib}
+@inproceedings{Nipk91,
+ author = "Nipkow, Tobias and Snelting, Gregor",
+ title = "Type Classes and Overloading Resolution via Order-Sorted
+ Unification",
+ booktitle = "Proc 5th ACM Conf. Functional Prog. Lang. and Comp. Arch.",
+ year = "1991",
+ publisher = "Springer",
+ journal = "LNCS",
+ volume = "523",
+ pages = "1-14",
+ abstract =
+ "We present a type inference algorithm for a Haskell-like language
+ based on order-sorted unification. The language features polymorphism,
+ overloading, type classes and multiple inheritance. Class and instance
+ declarations give rise to an order-sorted algebra of types. Type
+ inference essentially reduces to the Hindley/Milner algorithm where
+ unification takes place in this order-sorted algebra of types. The
+ theory of order-sorted unification provides simple sufficient
+ conditions which ensure the existence of principal types. The
+ semantics of the language is given by a translation into ordinary
+ lambda-calculus. We prove the correctness of our type inference
+ algorithm with respect to this semantics.",
+ paper = "Nipk91.pdf"
+}
+
+\end{chunk}
+
+\subsection{O} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Odifreddi, Piergiorgio}
+\begin{chunk}{axiom.bib}
+@book{Odif92,
+ author = "Odifreddi, Piergiorgio",
+ title = "Classical Recursion Theory: The Theory of Functions and Sets of
+ Natural Numbers",
+ publisher = "Elsevier",
+ year = "1992"
+}
+
+\end{chunk}
+
+\subsection{P} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Paterson, M. S.}
+\begin{chunk}{axiom.bib}
+@article{Pate78,
+ author = "Paterson, M. S.",
+ title = "Linear Unification",
+ journal = "J. Computer and System Sciences",
+ volume = "16",
+ number = "2",
+ year = "1978",
+ pages = "158-167",
+ abstract =
+ "A unification algorithm is described which tests a set of expressions
+ for unifiability and which requires time and space which are only linear
+ in the size of the input",
+ paper = "Pate78.pdf"
+}
+
+\end{chunk}
+
+\index{Pfenning, Frank}
+\begin{chunk}{axiom.bib}
+@inproceedings{Pfen91,
+ author = "Pfenning, Frank",
+ title = "Logic Programming in the LF Logical Framework",
+ booktitle = "Proc. First Workshop on Logical Frameworks",
+ year = "1991",
+ paper = "Pfen91.pdf"
+}
+
+\end{chunk}
+
+\index{Pfenning, Frank}
+\begin{chunk}{axiom.bib}
+@inproceedings{Pfen91a,
+ author = "Pfenning, Frank",
+ title = "Unification and Anti-Unification in the Calculus of Constructions",
+ booktitle = "Logic in Computer Science 91",
+ year = "1991",
+ pages = "74-85",
+ abstract =
+ "We present algorithms for unification and anti- unification in the
+ Calculus of Constructions, where occurrences of free variables (the
+ variables subject to instantiation) are restricted to higher-order
+ patterns, a notion investigated for the simply-typed $\lambda$-calculus
+ by Miller. Most general unifiers and least common anti-instances are
+ shown to exist and are unique up to a simple equivalence. The
+ unification algorithm is used for logic program execution and type and
+ term reconstruction in the current implementation of Elf and has
+ shown itself to be practical. The main application of the
+ anti-unification algorithm we have in mind is that of proof
+ generalization.",
+ paper = "Pfen91a.pdf"
+}
+
+\end{chunk}
+
+\index{Pfenning, Frank}
+\begin{chunk}{axiom.bib}
+@book{Pfen92,
+ author = "Pfenning, Frank",
+ title = "Types in Logic Programming",
+ isbn = "9780262161312",
+ publisher = "MIT Press",
+ year = "1992",
+ abstract =
+ "Types play an increasingly important role in logic programming, in
+ language design as well as language implementation. We present
+ various views of types, their connection, and their role within the
+ logic programming paradigm.
+
+ Among the basic views of types we find
+ the so-called descriptive systems, where types describe properties of
+ untyped logic programs, and prescriptive systems, where types are
+ essential to the meaning of programs. A typical ap- plication of
+ descriptive types is the approximation of the meaning of a logic
+ program as a subset of the Herbrand universe on which a predicate
+ might be true. The value of prescriptive systems lies primarily in
+ program devel- opment, for example, through early detection of errors
+ in programs which manifest themselves as type inconsistencies, or as
+ added documentation for the intended and legal use of predicates.
+
+ Central topics within these views are the problems of type inference
+ and type reconstruction, respectively. Type inference is a form of
+ analysis of untyped logic programs, while type reconstruction attempts
+ to fill in some omitted type information in typed logic programs and
+ generalizes the prob- lem of type checking. Even though analogous
+ problems arise in functional programming, algorithms addressing these
+ problems are quite different in our setting.
+
+ Among the specific forms of types we discuss are simple types,
+ recursive types, polymorphic types, and dependent types. We also
+ briefly touch upon subtypes and inheritance, and the role of types
+ in module systems for logic programming languages."
+}
+
+\end{chunk}
+
+\index{Pierce, Benjamin C.}
+\begin{chunk}{axiom.bib}
+@phdthesis{Pier91,
+ author = "Pierce, Benjamin C.",
+ title = "Programming with Intersection Types and Bounded Polymorphism",
+ institution = "Carnegie Mellon University",
+ year = "1991",
+ comment = "CMU-CS-91-205",
+ abstract =
+ "Intersection types and bounded quantification are complementary
+ mechanisms for extending the expressive power of statically typed
+ programming languages. They begin with a common framework: a simple,
+ typed language with higher-order functions and a notion of subtyping.
+ Intersection types extend this framework by giving every pair of types
+ $\sigma$ and $\tau$ a greatest lower bound, $\sigma \land \tau$,
+ corresponding intuitively to the intersection of the sets of values
+ described by $\sigma$ and $\tau$. Bounded quantification extends the
+ basic framework along a different axis by adding polymorphic functions
+ that operate uniformly on all the subtypes of a given type. This thesis
+ unifies and extends prior work on intersection types and bounded
+ quantification, previously studied only in isolation, by investigating
+ theoretical and practical aspects of a typed $\lambda$-calculus
+ incorporating both.
+
+ The practical utility of this calculus, called $F_\land$ is
+ established by examples showing, for instance, that it allows a rich
+ form of ``coherent overloading'' and supports an analog of abstract
+ interpretation during typechecking; for example, the addition function
+ is given a type showing that it maps pairs of positive inputs to a
+ positive result, pairs of zero inputs to a zero result, etc. More
+ familiar programming examples are presented in terms of an extention
+ of Forsythe (an Algol-like language with intersection types),
+ demonstrating how parametric polymorphism can be used to simplify and
+ generalize Forsythe's design. We discuss the novel programming and
+ debugging styles that arise in $F_\land$.
+
+ We prove the correctness of a simple semi-decision procedure for the
+ subtype relation and the partial correctness of an algorithm for
+ synthesizing minimal types of $F_\land$ terms. Our main tool in this
+ analysis is a notion of ``canonical types,'' which allows proofs to be
+ factored so that intersections are handled separately from the other
+ type constructors.
+
+ A pair of negative results illustrates some subtle complexities of
+ $F_\land$. First, the subtype relation of $F_\land$ is shown to be
+ undecidable; in fact, even the sutype relation of pure second-order
+ bounded quantification is undecidable, a surprising result in its own
+ right. Second, the failure of an important technical property of the
+ subtype relation -- the existence of least upper bounds -- indicates
+ that typed semantic models of $F_\land$ will be more difficult to
+ construct and analyze than the known typed models of intersection
+ types. We propose, for future study, some simpler fragments of
+ $F_\land$ that share most of its essential features, while recovering
+ decidability and least upper bounds.
+
+ We study the semantics of $F_\land$ from several points of view. An
+ untyped model based on partial equivalence relations demonstrates the
+ consistency of the typing rules and provides a simple interpolation
+ for programs, where ``$\sigma$ is a subtype of $\tau$'' is read as
+ ``$\sigma$ is a subset of $\tau$.'' More refined models can be
+ obtained using a translation from $F_\land$ into the pure polymorphic
+ $\lambda$-calculus; in these models, ``$\sigma$ is a subtype of
+ $\tau$'' is interpreted by an explicit coercion function from $\sigma$
+ to $\tau$. The nonexistence of least upper bounds shows up here in
+ the failure of known techniques for proving the coherence of the
+ translation semantics. Finally, an equational theory of equivalences
+ between $F_\land$ terms is presented and its soundness for both styles
+ of model is verified.",
+ paper = "Pier91.pdf"
+}
+
+\end{chunk}
+
+\index{Pierce, Benjamin C.}
+\begin{chunk}{axiom.bib}
+@techreport{Pier91a,
+ author = "Pierce, Benjamin C.",
+ title = "Bounded Quantification is Undecidable",
+ year = "1991",
+ number = "CMU-CS-91-161",
+ link = "\url{http://repository.cmu.edu/cgi/viewcontent.cgi?article=3059}",
+ abstract =
+ "$F_\le$ is a typed $\lambda$-calculus with subtyping and bounded
+ second-order polymorphism. First introduced by Cardelli and Wegner, it
+ has been widely studied as a core calculus for type systems with
+ subtyping.
+
+ Curien and Ghelli proved the partial correctness of a recursive
+ procedure for computing minimal types of $F_\le$ terms and showed
+ that the termination of this procedure is equivalent to the
+ termination of its major component, a procedure for checking the
+ subtype relation between $F_\le$ types. Ghelli later claimed that
+ this procedure is also guaranteed to terminate, but the discovery of a
+ subtle bug in his proof led him recently to observe that, in fact,
+ there are inputs on which the subtyping procedure diverges. This
+ reopens the question of the decidability of subtyping and hence of
+ typechecking.
+
+ This question is settled here in the negative, using a reduction from
+ the halting problem for two-counter Turing machines to show that the
+ subtype relation of $F_\le$ is undecidable.",
+ paper = "Pier91a.pdf"
+}
+
+\end{chunk}
+
+\index{Poizat, B.}
+\begin{chunk}{axiom.bib}
+@misc{Poiz85,
+ author = "Poizat, B.",
+ title = {Cours de Th\'eorie des Mod\'eles},
+ comment = {Nur al-Mantiq wal-Ma'rifah, Villeurbanne, France},
+ year = "1985"
+}
+
+\end{chunk}
+
+\subsection{Q} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\subsection{R} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Rector, D. L.}
+\begin{chunk}{axiom.bib}
+@InCollection{Rect89,
+ author = "Rector, D. L.",
+ title = "Semantics in Algebraic Computation",
+ booktitle = "Computers and Mathematics",
+ publisher = "Springer-Verlag",
+ year = "1989",
+ pages = "299-307",
+ isbn = "0-387-97019-3",
+ abstract =
+ "I am interested in symbolic computation for theoretical research in
+ algebraic topology. Most algebraic computations in topology are hand
+ calculations; that is, they can be accomplished by the researcher in
+ times ranging from hours to weeks, and they are aimed at discovering
+ general patterns rather than producing specific formulas understood in
+ advance. Furthermore, the range of algebraic constucts used in such
+ calculations is very wide.",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
\index{AlgebraGivenByStructuralConstants}
\index{Reed, Mary Lynn}
\begin{chunk}{axiom.bib}
@@ -708,6 +2984,115 @@ paragraph for those unfamiliar with the terms.
\end{chunk}
+\index{Remy, Didier}
+\begin{chunk}{axiom.bib}
+@inproceedings{Remy89,
+ author = "Remy, Didier",
+ title = "Typechecking Records and Variants in a Natural Extension of ML",
+ booktitle = "POPL 89",
+ isbn = "978-0-89791-294-5",
+ publisher = "ACM",
+ link = "\url{https://www.cs.cmu.edu/~aldrich/courses/819/row.pdf}",
+ abstract =
+ "We describe an extension of ML with records where inheritance is
+ given by ML generic polymorphism. All common operations on records but
+ concatenation are supported, in particular, the free extension of
+ records. Other operations such as renaming of fields are added. The
+ solution relies on an extension of ML, where the language of types is
+ sorted and considered modulo equations, and on a record extension of
+ types. The solution is simple and modular and the type inference
+ algorithm is efficient in practice.",
+ paper = "Remy89.pdf"
+}
+
+\end{chunk}
+
+\index{Reynolds, John C.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Reyn74,
+ author = "Reynolds, John C.",
+ title = "Towards a Theory of Type Structure",
+ booktitle = "Colloquim on Programming",
+ year = "1974",
+ pages = "9-11",
+ paper = "Reyn74.pdf"
+}
+
+\end{chunk}
+
+\index{Reynolds, John C.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Reyn80,
+ author = "Reynolds, John C.",
+ title = "Using Category Theory to Design Implicit Conversions and
+ Generic Operators",
+ booktitle = "Lecture Notes in Computer Science",
+ year = "1980",
+ abstract =
+ "A generalization of many-sorted algebras, called category-sorted
+ algebras, is defined and applied to the language-design problem of
+ avoiding anomalies in the interaction of implicit conversions and
+ generic operators. The definition of a simple imperative language
+ (without any binding mechanisms) is used as an example.",
+ paper = "Reyn80.pdf"
+}
+
+\end{chunk}
+
+\index{Reynolds, John C.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Reyn84,
+ author = "Reynolds, John C.",
+ title = "Polymorphism is not Set-theoretic",
+ booktitle = "Proc Semantics of Data Types",
+ pages = "145-156",
+ year = "1984",
+ link = "\url{https://hal.inria.fr/inria-00076261/document}",
+ abstract =
+ "The polymorphic, or second-order, typed lambda calculus is an
+ extension of the typed lambda calculus in which polymorphic functions
+ can be defined. In this paper that the standard set-theoretic model of
+ the ordinary typed lambda calculus cannot be extended to model this
+ language extension.",
+ paper = "Reyn84.pdf"
+}
+
+\end{chunk}
+
+\index{Reynolds, John C.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Reyn91,
+ author = "Reynolds, John C.",
+ title = "The Coherence of Languages with Intersection Types",
+ booktitle = "TACS 91",
+ year = "1991",
+ abstract =
+ "When a programming language has a sufficiently rich type structure,
+ there can be more than one proof of the same typing judgement;
+ potentially this can lead to semantic ambiguity since the semantics of
+ a typed language is a function of such proofs. When no such ambiguity
+ arises, we say that the language is coherent. In this paper we prove
+ the coherence of a class of lambda-calculus-based languages that use
+ the intersection type discipline, including both a purely functional
+ programming language and the Algol-like programming language Forsythe.",
+ paper = "Reyn91.pdf"
+}
+
+\end{chunk}
+
+\index{Robinson, J. S. Derek}
+\begin{chunk}{axiom.bib}
+@book{Robi96,
+ author = "Robinson, J. S. Derek",
+ title = "A Course in the Theory of Groups",
+ year = "1996",
+ series = "Graduate Texts in Mathematics",
+ isbn = "978-1-4612-6443-9",
+ publisher = "Springer"
+}
+
+\end{chunk}
+
\index{Rolle, Michel}
\begin{chunk}{axiom.bib}
@misc{Roll1691,
@@ -724,6 +3109,81 @@ paragraph for those unfamiliar with the terms.
\end{chunk}
+\index{Rydeheard, D. E.}
+\index{Burstall, R. M.}
+\begin{chunk}{axiom.bib}
+@book{Ryde88,
+ author = "Rydeheard, D. E. and Burstall, R. M.",
+ title = "Computational Category Theory",
+ publisher = "Prentice Hall",
+ year = "1988",
+ isbn = "978-0131627369"
+}
+
+\end{chunk}
+
+\subsection{S} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Schmidt-Schauss, M.}
+\begin{chunk}{axiom.bib}
+@book{Schm89,
+ author = "Schmidt-Schauss, M.",
+ title = "Computational Aspects of an Order-Sorted Logic with Term
+ Declarations",
+ publisher = "Springer",
+ isbn = "978-3-540-51705-4",
+ year = "1989"
+}
+
+\end{chunk}
+
+\index{Schoenfinkel, M.}
+\begin{chunk}{axiom.bib}
+@misc{Scho24,
+ author = "Schoenfinkel, M.",
+ title = "Uber die Bausteine der mathematischen Logik",
+ year = "1924",
+ pages = "305-316"
+}
+
+\end{chunk}
+
+\index{Schubert, Horst}
+\begin{chunk}{axiom.bib}
+@book{Schu72,
+ author = "Schubert, Horst",
+ title = "Categories",
+ publisher = "Springer-Verlag",
+ year = "1972"
+}
+
+\end{chunk}
+
+\index{Siekmann, Jorg H.}
+\begin{chunk}{axiom.bib}
+@article{Siek89,
+ author = "Siekmann, Jorg H.",
+ title = "Unification Theory",
+ journal = "Journal of Symbolic Computation",
+ volume = "7",
+ number = "3-4",
+ year = "1989",
+ pages = "207-274",
+ abstract =
+ "Most knowledge based systems in artificial intelligence (AI), with a
+ commitment to asymbolic representation, support one basic operation:
+ ``matching of descriptions''. This operation, called unification in work
+ on deduction, is the ``addition-and-multiplication'' of AI-systems and
+ is consequently often supported by special purpose hardware or by a
+ fast instruction set on most AI-machines. Unification theory provides
+ the formal framework for investigations into the properties of this
+ operation. This article surveys what is presently known in unification
+ theory and records its early history.",
+ paper = "Siek89.pdf"
+}
+
+\end{chunk}
+
\index{PermutationGroup}
\index{Sims, Charles}
\begin{chunk}{axiom.bib}
@@ -740,6 +3200,44 @@ paragraph for those unfamiliar with the terms.
\end{chunk}
+\index{Smolka, G.}
+\begin{chunk}{axiom.bib}
+@article{Smol88,
+ author = "Smolka, G.",
+ title = "Logic Programming with Polymorphically Order-sorted Types",
+ journal = "Lecture Notes in Computer Science",
+ volume = "343",
+ pages = "53-70",
+ year = "1988"
+}
+
+\end{chunk}
+\index{Smolka, G.}
+\index{Nutt, W.}
+\index{Goguen, J.}
+\index{Meseguer, J.}
+\begin{chunk}{axiom.bib}
+@InCollection{Smol89,
+ author = "Smolka, G. and Nutt, W. and Goguen, J. and Meseguer, J.",
+ title = "Order-sorted Equational Computation",
+ booktitle = "Resolution of Equations in Algebra Structures (Vol 2)",
+ pages = "297-367",
+ year = "1989"
+}
+
+\end{chunk}
+
+\index{Smolka, G.}
+\begin{chunk}{axiom.bib}
+@phdthesis{Smol89a,
+ author = "Smolka, G.",
+ title = "Logic Programming over Polymorphically Order-Sorted Types",
+ institution = "Fachbereich Informatik, Universitat Kaiserslautern",
+ year = "1989"
+}
+
+\end{chunk}
+
\begin{chunk}{axiom.bib}
@misc{Stac17,
author = "StackExchange",
@@ -750,6 +3248,88 @@ paragraph for those unfamiliar with the terms.
\end{chunk}
+\index{Stansifer, R.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Stan88,
+ author = "Stansifer, R.",
+ title = "Type Inference with Subtypes",
+ booktitle = "POPL 88",
+ pages = "88-97",
+ year = "1988",
+ abstract =
+ "We give an algorithm for type inference in a language with functions,
+ records, and variant records. A similar language was studied by
+ Cardelli who gave a type checking algorithm. This language is
+ interesting because it captures aspects of object-oriented programming
+ using subtype polymorphism. We give a type system for deriving types
+ of expressions in the language and prove the type inference algorithm
+ is sound, i.e., it returns a type derivable from the proof system. We
+ also prove that the type the algorithm finds is a ``principal'' type,
+ i.e., one which characterizes all others. The approach taken here is
+ due to Milner for universal polymorphism. The result is a synthesis of
+ subtype polymorphism and universal polymorphism.",
+ paper = "Stan88.pdf"
+}
+
+\end{chunk}
+
+\index{Strachey, Christopher}
+\begin{chunk}{axiom.bib}
+@article{Stra00,
+ author = "Strachey, Christopher",
+ title = "Fundamental Concepts in Programming Languages",
+ journal = "Higher-Order and Symbolic Computation",
+ volume = "13",
+ number = "1-2",
+ pages = "11-49",
+ year = "2000",
+ abstract =
+ "This paper forms the substance of a course of lectures given at the
+ International Summer School in Computer Programming at Copenhagen in
+ August, 1967. The lectures were originally given from notes and the
+ paper was written after the course was finished. In spite of this, and
+ only partly because of the shortage of time, the paper still retains
+ many of the shortcomings of a lecture course. The chief of these are
+ an uncertainty of aim—it is never quite clear what sort of audience
+ there will be for such lectures—and an associated switching from
+ formal to informal modes of presentation which may well be less
+ acceptable in print than it is natural in the lecture room. For these
+ (and other) faults, I apologise to the reader.
+
+ There are numerous references throughout the course to CPL [1–3]. This
+ is a programming language which has been under development since 1962
+ at Cambridge and London and Oxford. It has served as a vehicle for
+ research into both programming languages and the design of
+ compilers. Partial implementations exist at Cambridge and London. The
+ language is still evolving so that there is no definitive manual
+ available yet. We hope to reach another resting point in its evolution
+ quite soon and to produce a compiler and reference manuals for this
+ version. The compiler will probably be written in such a way that it
+ is relatively easyto transfer it to another machine, and in the first
+ instance we hope to establish it on three or four machines more or
+ less at the same time.
+
+ The lack of a precise formulation for CPL should not cause much
+ difficulty in this course, as we are primarily concerned with the
+ ideas and concepts involved rather than with their precise
+ representation in a programming language.",
+ paper = "Stra00.pdf"
+}
+
+\end{chunk}
+
+\index{Stroustrup, Bjarne}
+\begin{chunk}{axiom.bib}
+@book{Stro95,
+ author = "Stroustrup, Bjarne",
+ title = "The C++ Programming Language (2nd Edition)",
+ publisher = "Addison-Wesley",
+ year = "1995",
+ isbn = "0-201-53992-6"
+}
+
+\end{chunk}
+
\index{Sturm, Jacques Charles Francois}
\begin{chunk}{axiom.bib}
@article{Stur1829,
@@ -785,6 +3365,416 @@ paragraph for those unfamiliar with the terms.
\end{chunk}
+\index{Szabo, P.}
+\begin{chunk}{axiom.bib}
+@phdthesis{Szab82,
+ author = "Szabo, P.",
+ title = "Unifikationstheorie erster Ordnung",
+ institution = {Fakult\"at f\"ur Informatik, Universit\"at Karlsruhe},
+ year = "1982"
+}
+
+\end{chunk}
+
+\subsection{T} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Temperini, M.}
+\begin{chunk}{axiom.bib}
+@misc{Temp92,
+ author = "Temperini, M.",
+ title = "Design and Implementation Methodologies for Symbolic
+ Computation Systems",
+ year = "1992",
+ comment = "Preprint"
+}
+
+\end{chunk}
+
+\index{Thatte, Satish R.}
+\begin{chunk}{axiom.bib}
+@article{That91,
+ author = "Thatte, Satish R.",
+ title = "Coercive Type Isomorphism",
+ journal = "LNCS",
+ volume = "523",
+ year = "1991",
+ pages = "29-49",
+ abstract =
+ "There is a variety of situations in programming in which it is useful
+ to think of two distinct types as representations of the same abstract
+ structure. However, language features which allow such relations to
+ be effectively expressed at an abstract level are lacking. We propose
+ a generalization of ML-style type inference to deal effectively with
+ this problem. Under the generalization, the (normally free) algebra
+ of type expressions is subjected to an equational theory generated by
+ a finite set of user-specified equations that express
+ interconvertibility relations between objects of ``equivalent'' types.
+ Each type equation is accompanied by a pair of conversion functions
+ that are (at least partial) inverses. We show that so long as the
+ equational theory satisfies a reasonably permissive syntactic
+ constraint, the resulting type system admits a complete type infer-
+ ence algorithm that produces unique principal types. The main
+ innovation required in type inference is the replacement of ordinary
+ free unification by unification in the user-specified equational
+ theory. The syntactic constraint ensures that the latter is unitary,
+ i.e., yields unique most general unifiers. The proposed constraint is
+ of independent interest as the first known syntactic
+ characterization for a class of unitary theories. Some of the
+ applicatloils of the system are similar to those of Wadler's views
+ [Wad87]. However, our system is considerably more general, and more
+ orthogonal to the underlying language.",
+ paper = "That91.pdf"
+}
+
+\end{chunk}
+
+\index{Tiuryn, J.}
+\begin{chunk}{axiom.bib}
+@article{Tiur90,
+ author = "Tiuryn, J.",
+ title = "Type Inference Problems -- A Survey",
+ journal = "LNCS",
+ volume = "452",
+ pages = "105-120",
+ year = "1990",
+ paper = "Tiur90.pdf"
+}
+
+\end{chunk}
+
+\index{Tiuryn, J.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Tiur92,
+ author = "Tiuryn, J.",
+ title = "Subtype Inequalities",
+ booktitle = "Proc. Logic in Computer Science 92",
+ year = "1992",
+ pages = "308-315",
+ abstract =
+ "In this paper we study the complexity of the satisfiability problem
+ for subtype inequalities in simple types. The naive algorithm which
+ solves this problem runs in non-deterministic exponential time for
+ every pre-defined poset of atomic subtypings. In this paper we show
+ that over certain finite posets of atomic subtypings the
+ satisfiability problem for subtype inequalities is PSPACE-hard. On
+ the other hand we prove that if the poset of atomic subtypings is a
+ disjoint union of lattices, then the satisfiability problem for
+ subtype inequalities is solvable in PTIME. This result covers the
+ important special case of the unification problem which can be
+ obtained when the atomic subtype relation is equality (in this case
+ the poset is a union of one-element lattices).",
+ paper = "Tiur92.pdf"
+}
+
+\end{chunk}
+
+\index{Turner, D.A.}
+\begin{chunk}{axiom.bib}
+@article{Turn85,
+ author = "Turner, D. A.",
+ title = "Miranda: A non-strict functional language with polymorphic types",
+ journal = "Lecture Notes in Computer Science",
+ volume = "201",
+ pages = "1-16",
+ year = "1985",
+ link = "\url{http://miranda.org.uk/nancy.html}",
+ paper = "Turn85.pdf"
+}
+
+\end{chunk}
+
+\index{Turner, D.A.}
+\begin{chunk}{axiom.bib}
+@article{Turn86,
+ author = "Turner, D. A.",
+ title = "An Overview of Miranda",
+ journal = "SIGPLAN Notices",
+ volume = "21",
+ number = "12",
+ pages = "158-166",
+ year = "1986",
+ link = "\url{http://miranda.org.uk/}",
+ paper = "Turn86.pdf"
+}
+
+\end{chunk}
+
+\subsection{U} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\subsection{V} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Volpano, Dennis M.}
+\index{Geoffrey S.}
+\begin{chunk}{axiom.bib}
+@techreport{Volp91,
+ author = "Volpano, Dennis M. and Geoffrey S.",
+ title = "On the Complexity of ML Typability with Overloading",
+ institution = "Cornell University",
+ year = "1991",
+ number = "TR91-1210",
+ abstract =
+ "We examine the complexity of type checking in an ML-style type system
+ that permits functions to be overloaded with different types. In
+ particular, we consider the extension of the ML Type system proposed
+ by Wadler and Blott in the appendix of [WB89], with global overloading
+ only, that is, where the only overloading is that which exists in an
+ initial type assumption set; no local overloading via over and inst
+ expressions is allowed. It is shown that under a correct notion of
+ well-typed terms, the problem of determining whether a term is well
+ typed with respect to an assumption set in this system is
+ undecidable. We then investigate limiting recursion in assumption
+ sets, the source of the undecidability. Barring mutual recursion is
+ considered, but this proves too weak, for the problem remains
+ undecidable. Then we consider a limited form of recursion called
+ parametric recursion. We show that although the problem becomes
+ decidable under parametric recursion, it appears harder than
+ conventional ML typability, which is complete for DEXPTIME [Mai90].",
+ paper = "Volp91.pdf"
+}
+
+\end{chunk}
+
+\subsection{W} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Waldmann, Uwe}
+\begin{chunk}{axiom.bib}
+@article{Wald92,
+ author = "Waldmann, Uwe",
+ title = "Semantics of Order-sorted Specifications",
+ journal = "Theoretical Computer Science",
+ volume = "94",
+ number = "1-2",
+ year = "1992",
+ pages = "1-35",
+ abstract =
+ "Order-sorted specifications (i.e. many-sorted specifications with
+ subsort relations) have been proved to be a useful tool for the
+ description of partially defined functions and error handling in
+ abstract data types.
+
+ Several definitions for order-sorted algebras have been proposed. In
+ some papers an operator symbol, which may be multiply declared, is
+ interpreted by a family of functions (“overloaded” algebras). In other
+ papers it is always interpreted by a single function (“nonoverloaded”
+ algebras). On the one hand, we try to demonstrate the differences
+ between these two approaches with respect to equality, rewriting and
+ completion; on the other hand, we prove that in fact both theories can
+ be studied in parallel provided that certain notions are suitably
+ defined.
+
+ The overloaded approach differs from the many-sorted and the
+ nonoverloaded one in that the overloaded term algebra is not
+ necessarily initial. We give a decidable sufficient criterion for the
+ initiality of the term algebra, which is less restrictive than
+ GJM-regularity as proposed by Goguen, Jouannaud and Meseguer.
+
+ Sort-decreasingness is an important property of rewrite systems since
+ it ensures that confluence and Church-Rosser property are equivalent,
+ that the overloaded and nonoverloaded rewrite relations agree, and
+ that variable overlaps do not yield critical pairs. We prove that it
+ is decidable whether or not a rewrite rule is sort-decreasing, even if
+ the signature is not regular.
+
+ Finally, we demonstrate that every overloaded completion procedure may
+ also be used in the nonoverloaded world, but not conversely, and that
+ specifications exist that can only be completed using the
+ nonoverloaded semantics.",
+ paper = "Wald92.pdf"
+}
+
+\end{chunk}
+
+\index{Wand, Mitchell}
+\begin{chunk}{axiom.bib}
+@inproceedings{Wand87,
+ author = "Wand, Mitchell",
+ title = "Complete Type Inference for Simple Objects",
+ booktitle = "Symp. on Logic in Computer Science",
+ year = "1987",
+ pages = "22-25",
+ abstract =
+ "The problem of strong typing is considered for a model of
+ object-oriented programming systems. These systems permit values which
+ are records of other values, and in which fields inside these records
+ are retrieved by name. A type system is proposed that permits
+ classification of these kinds of values and programs by the type of
+ their result, as is usual in strongly-typed programming languages. The
+ type system has two important properties: it admits multiple
+ inheritance, and it has a syntactically complete type inference system.",
+ paper = "Wand87.pdf"
+}
+
+\end{chunk}
+
+\index{Wand, Mitchell}
+\begin{chunk}{axiom.bib}
+@inproceedings{Wand88,
+ author = "Wand, Mitchell",
+ title = "Corrigendum: Complete Type Inference for Simple Objects",
+ booktitle = "Symp. on Logic in Computer Science",
+ year = "1988",
+ pages = "5-8",
+ abstract =
+ "An error has been pointed out in the author's paper (see Proc. 2nd
+ IEEE Symp. on Logic in Computer Science, p 37-44 (1987)). It appears
+ that there are programs without principal type schemes in the
+ system in that paper."
+}
+
+\end{chunk}
+
+\index{Wand, Mitchell}
+\begin{chunk}{axiom.bib}
+@inproceedings{Wand89,
+ author = "Wand, Mitchell",
+ title = "Type Inference for Record Concatenation and Multiple
+ Inheritance",
+ booktitle = "Logic in Computer Science",
+ year = "1989",
+ isbn = "0-8186-1954-6",
+ abstract =
+ "The author shows that the type inference problem for a lambda
+ calculus with records, including a record concatenation operator, is
+ decidable. He shows that this calculus does not have principal types
+ but does have finite complete sets of type, that is, for any term M in
+ the calculus, there exists an effectively generable finite set of type
+ schemes such that every typing for M is an instance of one of the
+ schemes in the set. The author shows how a simple model of
+ object-oriented programming, including hidden instance variables and
+ multiple inheritance, may be coded in this calculus. The author
+ concludes that type inference is decidable for object-oriented
+ programs, even with multiple inheritance and classes as first-class
+ values.",
+ paper = "Wand89.pdf"
+}
+
+\end{chunk}
+
+\index{Wand, Mitchell}
+\begin{chunk}{axiom.bib}
+@article{Wand91,
+ author = "Wand, Mitchell",
+ title = "Type Inference for Record Concatenation and Multiple
+ Inheritance",
+ journal = "Information and Computation",
+ volume = "93",
+ issue = "1",
+ year = "1991",
+ pages = "1-15",
+ abstract =
+ "We show that the type inference problem for a lambda calculus with
+ records, including a record concatenation operator, is decidable. We
+ show that this calculus does not have principal types, but does have
+ finite complete sets of types: that is, for any term M in the
+ calculus, there exists an effectively generable finite set of type
+ schemes such that every typing for M is an instance of one of the
+ schemes in the set. We show how a simple model of object-oriented
+ programming, including hidden instance variables and multiple
+ inheritance, may be coded in this calculus. We conclude that type
+ inference is decidable for object-oriented programs, even with
+ multiple inheritance and classes as first-class values.",
+ paper = "Wand91.pdf"
+}
+
+\end{chunk}
+
+\index{Weber, Andreas}
+\begin{chunk}{axiom.bib}
+@techreport{Webe92b,
+ author = "Weber, Andreas",
+ title = "Structuring the Type System of a Computer Algebra System",
+ link = "\url{http://cg.cs.uni-bonn.de/personal-pages/weber/publications/pdf/WeberA/Weber92a.pdf}",
+ institution = "Wilhelm-Schickard-Institut fur Informatik",
+ year = "1992",
+ abstract =
+ "Most existing computer algebra systems are pure symbol manipulating
+ systems without language support for the occuring types. This is
+ mainly due to the fact taht the occurring types are much more
+ complicated than in traditional programming languages. In the last
+ decade the study of type systems has become an active area of
+ research. We will give a proposal for a type system showing that
+ several problems for a type system of a symbolic computation system
+ can be solved by using results of this research. We will also provide
+ a variety of examples which will show some of the problems that remain
+ and that will require further research.",
+ paper = "Webe92b.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Wirsing, Martin}
+\index{Broy, Manfred}
+\begin{chunk}{axiom.bib}
+@inproceedings{Wirs82,
+ author = "Wirsing, Martin and Broy, Manfred",
+ title = "An Analysis of Semantic Models for Algebraic Specifications",
+ booktitle = "Theoretical Foundations of Programming Methodology",
+ year = "1982",
+ publisher = "Springer",
+ pages = "351-413",
+ isbn = "978-94-009-7893-5",
+ abstract =
+ "Data structures, algorithms and programming languages can be
+ described in a uniform implementation-independent way by axiomatic
+ abstract data types i.e. by algebraic specifications defining
+ abstractly the properties of objects and functions. Different semantic
+ models such as initial and terminal algebras have been proposed in
+ order to specify the meaning of such specifications -often involving a
+ considerable amount of category theory. A more concrete semantics
+ encompassing these different approaches is presented:
+
+ Abstract data types are specified in hierarchies, employing
+ ``primitive'' types on which other types are based. The semantics is
+ defined to be the class of all partial heterogeneous algebras
+ satisfying the axioms and respecting the hierarchy. The interpretation
+ of a specification as its initial or terminal algebra is just a
+ constraint on the underlying data. These constraints can be modified
+ according to the specification goals. E.g. the data can be specified
+ using total functions; for algorithms partial functions with
+ syntactically checkable domains seem appropriate whereas for
+ programming languages the general notion of partiality is needed,
+ Model-theoretic and deduction-oriented conditions are developed which
+ ensure properties leading to criteria for the soundness and complexity
+ of specifications. These conditions are generalized to parameterized
+ types, i.e. type procedures mapping types into types. Syntax and
+ different semantics of parameter are defined and discussed. Criteria
+ for proper parameterized specifications are developed. It is shown
+ that the properties of proper specifications viz. of snowballing and
+ impeccable types are preserved under application of parameterized
+ types — finally guaranteeing that the composition of proper small
+ specifications always leads to a proper large specification."
+}
+
+\end{chunk}
+
+\index{Wirsing, Martin}
+\begin{chunk}{axiom.bib}
+@InCollection{Wirs91,
+ author = "Wirsing, Martin",
+ title = "Algebraic Specification",
+ booktitle = "Handbook of Theoretical Computer Science (Vol B)",
+ publisher = "MIT Press",
+ year = "1991",
+ pages = "675-788",
+ chapter = "13",
+ isbn = "0-444-88074-7"
+}
+
+\end{chunk}
+
+\index{Wolfram, Stephen}
+\begin{chunk}{axiom.bib}
+@book{Wolf91,
+ author = "Wolfram, Stephen",
+ title = "Mathematica: A System for Doing Mathematics by Computer",
+ publisher = "Addison-Wesley",
+ isbn = "978-0201515022",
+ year = "1991"
+}
+
+\end{chunk}
+
\index{AlgebraGivenByStructuralConstants}
\index{W\"orz-Busekros, A.}
\begin{chunk}{axiom.bib}
@@ -842,6 +3832,26 @@ paragraph for those unfamiliar with the terms.
\end{chunk}
+\subsection{X} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\subsection{Y} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\subsection{Z} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Zariski, Oscar}
+\index{Samuel, Pierre}
+\begin{chunk}{axiom.bib}
+@book{Zari75,
+ author = "Zariski, Oscar and Samuel, Pierre",
+ title = "Commutative Algebra",
+ Series = "Graduate Texts in Mathematics",
+ year = "1975",
+ publisher = "Springer-Verlag",
+ isbn = "978-0387900896"
+}
+
+\end{chunk}
+
\section{Linear Algebra} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\index{matrix sparse}
@@ -37014,7 +40024,8 @@ IBM T. J. Watson Research Center (2001)
We will give a quite simple example of a family of types arising in
computer algebra whose coercion relations cannot be captured by a
finite set of first-order rewrite rules.",
- paper = "Webe05.pdf"
+ paper = "Webe05.pdf",
+ keywords = "axiomref"
}
\end{chunk}
@@ -37553,12 +40564,39 @@ Oxford University Press (2000) ISBN0-19-512516-9
\end{chunk}
\index{Yun, David Y.Y}
-\begin{chunk}{ignore}
-\bibitem[Yun 83]{Yun83} Yun, David Y.Y.
+\begin{chunk}{axiom.bib}
+@inproceedings{Yunx83,
+ author = "Yun, David Y.Y",
title = "Computer Algebra and Complex Analysis",
-Computational Aspects of Complex Analysis pp379-393
-D. Reidel Publishing Company H. Werner et. al. (eds.)
+ booktitle = "Computational Aspects of Complex Analysis",
+ pages = "379-393",
+ publisher = "D. Reidel Publishing Company",
+ year = "1983",
+ abstract =
+ "Taking complex analysis to mean complex numerical analysis, I
+ perceive my mission here to be that of disseminating the algebraic
+ approach taken by computer algebraists to many mathematical problems,
+ which arise from and are important to complex analysis. In turn,
+ complex numerical analysis can be, and have been, providing essential
+ theoretical and computational results for computer algebra. The cross
+ fertilization should and must continue in order that computational
+ mathematics progress with the joint aid of both tools, rather than
+ branching into orthogonal pursuits with disparate approaches. First,
+ we discuss the different issues and principal concerns of computer
+ algebra. Then, the algebraic approach to a long standing problem in
+ calculus or complex analysis, indefinite integration in closed form,
+ will be motivated and derived through examples. Algorithmic solution
+ to the basic, thought provoking, problem of rational function
+ integration as well as theoretical foundation underlying the algorithm
+ for elementary function integration will be discussed. Further issues
+ and approaches will be illustrated through another central (implicitly
+ essential) problem of computer algebra, that is simplification of
+ symbolic and algebraic expressions. We conclude by showing a set of
+ computer executed problems in integration to reveal some of the new
+ capabilities added to the arsenal of a mathematician through the
+ efforts of computer algebra.",
keywords = "axiomref"
+}
\end{chunk}
diff --git a/books/bookvolbug.pamphlet b/books/bookvolbug.pamphlet
index 97a5902..e6a9d8a 100644
--- a/books/bookvolbug.pamphlet
+++ b/books/bookvolbug.pamphlet
@@ -6,7 +6,7 @@
\chapter{Introduction}
\section{The Numbering Scheme}
\begin{verbatim}
-bug 7326:
+bug 7335:
todo 342:
wish 1012:
meh 5:
@@ -18,6 +18,107 @@ nonextend 60077:
\end{verbatim}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{bug 7334: coerce missing from EXPR(Quaternion (Complex (Float)))}
+\begin{verbatim}
+
+t1:Quaternion Complex Expression Integer:=quatern(1.1,1.2,1.3,1.4)
+Function: coerce : Quaternion(Complex(Float)) -> % is missing from domain:
+ Expression(Quaternion(Complex(Float)))
+ Internal Error
+ The function coerce with signature $(Quaternion (Complex (Float))) is
+ missing from domain Expression(Quaternion (Complex (Float)))
+
+\end{verbatim}
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{bug 7333: elt: index out of range}
+\begin{verbatim}
+
+-- fix bug 7324
+bmt:=zero(2,2)$Matrix(POLY PF 2)
+determinant bmt
+
+\end{verbatim}
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{bug 7332: cannot be coerced to mode (SUP(INT))}
+\begin{verbatim}
+
+-- regression test of bug 7298: coercion to SUP failure in factor
+-- fixed by 20150126.02.wxh.patch
+
+rh1:=(4*x^3+2*y^2+1)*(12*x^5-x^3*y+12)*(x+1)*(y^2+3)*(x^2-1)
+rh2:=factor(rh1)
+
+\end{verbatim}
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{bug 7331: raise an error but works interpreted}
+\begin{verbatim}
+
+f==n+->sum(sum(1/i,i=1..j),j=1..n)
+f(3)
+
+\end{verbatim}
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{bug 7330: does not modify emptybst}
+\begin{verbatim}
+
+emptybst := empty()$BSTREE(INT)
+t1 := insert!(8,emptybst)
+t1
+emptybst
+
+\end{verbatim}
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{bug 7329: fails}
+\begin{verbatim}
+
+lm := [3,5,7,11]
+t := balancedBinaryTree(#lm, 0)
+setleaves!(t,lm)
+mapDown!(t,12,rem)
+
+\end{verbatim}
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{bug 7328: delete! does not delete first table item}
+\begin{verbatim}
+
+Data := Record(monthsOld : Integer, gender : String)
+al : AssociationList(String,Data) := table()
+al."bob" := [407,"male"]$Data
+al."judith" := [366,"female"]$Data
+al
+delete!(al,1)
+al
+
+\end{verbatim}
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{bug 7327: This truncates the list}
+\begin{verbatim}
+
+l := [1,4,2,-6,0,3,5,4,2,3]
+reverse! l
+l
+
+\end{verbatim}
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{bug 7326: insert! does not modify the list}
+\begin{verbatim}
+
+)d op insert!
+
+This modifies the list
+l := [1,4,2,-6,0,3,5,4,2,3]
+insert!(10,l,4)
+l
+
+But this does not
+l := [1,4,2,-6,0,3,5,4,2,3]
+insert!(2,l,1)
+l
+
+\end{verbatim}
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{book0 Jenks}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
diff --git a/books/catmac.sty b/books/catmac.sty
new file mode 100644
index 0000000..7df2f51
--- /dev/null
+++ b/books/catmac.sty
@@ -0,0 +1,1013 @@
+% This should appear in a file named catmac.sty
+% Copyright 1988,1989 Michael Barr
+% Department of Mathematics and Statistics
+% McGill University
+% 805 Sherbrooke St., W
+% Montreal, Quebec, Canada
+% H3P 1S4
+%
+% inhb@mcgillb.bitnet
+%
+% All commercial rights reserved. May be freely distributed
+% and used with the following exceptions:
+% 1. No commercial use without explicit permission.
+% 2. It may not be used by any employee of a telephone
+% company.
+% 3. It may not be distributed without this notice.
+%
+% Last revised 89-12-11
+
+
+\newcount \coefa
+\newcount \coefb
+\newcount \coefc
+\newcount\tempcounta
+\newcount\tempcountb
+\newcount\tempcountc
+\newcount\tempcountd
+\newcount\xext
+\newcount\yext
+\newcount\xoff
+\newcount\yoff
+\newcount\gap%
+\newcount\arrowtypea
+\newcount\arrowtypeb
+\newcount\arrowtypec
+\newcount\arrowtyped
+\newcount\arrowtypee
+\newcount\height
+\newcount\width
+\newcount\xpos
+\newcount\ypos
+\newcount\run
+\newcount\rise
+\newcount\arrowlength
+\newcount\halflength
+\newcount\arrowtype
+\newdimen\tempdimen
+\newdimen\xlen
+\newdimen\ylen
+\newsavebox{\tempboxa}%
+\newsavebox{\tempboxb}%
+\newsavebox{\tempboxc}%
+
+
+\newcount \repetitions
+\def\repto#1{%
+\mathrel{
+\hbox{$
+\repetitions=0%
+\loop\ifnum\repetitions<#1%
+ \advance \repetitions by 1%
+ \relbar\joinrel
+\repeat
+\rightarrow
+$}}}
+\def\to{\repto0}
+\def\To{\repto1}
+\def\two{\reptwo0}
+\def\Two{\reptwo1}
+\def\tofro{\mathrel{\vcenter{\hbox{\oalign
+{$\longrightarrow$\crcr$\longleftarrow$}}}}}
+
+\def\epi{\mathrel{\mathchar"221\mkern -12mu\mathchar"221}}
+\def\leftepi{\mathrel{\mathchar"220\mkern -12mu\mathchar"220}}
+\def\mon{\mathrel{\m@th\hbox to
+ 14.6pt{\lasyb\char'51\hskip-2.1pt$\arrext$\hss
+$\mathord\rightarrow$}}} % width of \epi
+\def\leftmono{\mathrel{\m@th\hbox to
+14.6pt{$\mathord\leftarrow$\hss$\arrext$\hskip-2.1pt\lasyb\char'50%
+}}} % width of \epi
+\font\lasyb=lasyb10 scaled \magstephalf % for \mon
+\mathchardef\arrext="0200 % amr minus for arrow extension (see \into)
+
+
+\setlength{\unitlength}{.01em}%
+\def\settypes(#1,#2,#3){\arrowtypea#1 \arrowtypeb#2 \arrowtypec#3}
+\def\settoheight#1#2{\setbox\@tempboxa\hbox{#2}#1\ht\@tempboxa\relax}%
+\def\settodepth#1#2{\setbox\@tempboxa\hbox{#2}#1\dp\@tempboxa\relax}%
+\def\settokens[#1`#2`#3`#4]{%
+ \def\tokena{#1}\def\tokenb{#2}\def\tokenc{#3}\def\tokend{#4}}
+\def\setsqparms[#1`#2`#3`#4;#5`#6]{%
+\arrowtypea #1
+\arrowtypeb #2
+\arrowtypec #3
+\arrowtyped #4
+\width #5
+\height #6
+}
+\def\setpos(#1,#2){\xpos=#1 \ypos#2}
+
+\def\bfig{\begin{picture}(\xext,\yext)(\xoff,\yoff)}
+\def\efig{\end{picture}}
+
+\def\putbox(#1,#2)#3{\put(#1,#2){\makebox(0,0){$#3$}}}
+
+\def\settriparms[#1`#2`#3;#4]{\settripairparms[#1`#2`#3`1`1;#4]}%
+
+\def\settripairparms[#1`#2`#3`#4`#5;#6]{%
+\arrowtypea #1
+\arrowtypeb #2
+\arrowtypec #3
+\arrowtyped #4
+\arrowtypee #5
+\width #6
+\height #6
+}
+
+\def\resetparms{\settripairparms[1`1`1`1`1;500]\width 500}%default values%
+
+\resetparms
+
+\def\mvector(#1,#2)#3{%%
+\put(0,0){\vector(#1,#2){#3}}%
+\put(0,0){\vector(#1,#2){30}}%
+}
+\def\evector(#1,#2)#3{{%%
+\arrowlength #3
+\put(0,0){\vector(#1,#2){\arrowlength}}%
+\advance \arrowlength by-30
+\put(0,0){\vector(#1,#2){\arrowlength}}%
+}}
+
+\def\horsize#1#2{%
+\settowidth{\tempdimen}{$#2$}%
+#1=\tempdimen
+\divide #1 by\unitlength
+}
+
+\def\vertsize#1#2{%
+\settoheight{\tempdimen}{$#2$}%
+#1=\tempdimen
+\settodepth{\tempdimen}{$#2$}%
+\advance #1 by\tempdimen
+\divide #1 by\unitlength
+}
+
+\def\vertadjust[#1`#2`#3]{%
+\vertsize{\tempcounta}{#1}%
+\vertsize{\tempcountb}{#2}%
+\ifnum \tempcounta<\tempcountb \tempcounta=\tempcountb \fi
+\divide\tempcounta by2
+\vertsize{\tempcountb}{#3}%
+\ifnum \tempcountb>0 \advance \tempcountb by20 \fi
+\ifnum \tempcounta<\tempcountb \tempcounta=\tempcountb \fi
+}
+
+\def\horadjust[#1`#2`#3]{%
+\horsize{\tempcounta}{#1}%
+\horsize{\tempcountb}{#2}%
+\ifnum \tempcounta<\tempcountb \tempcounta=\tempcountb \fi
+\divide\tempcounta by20
+\horsize{\tempcountb}{#3}%
+\ifnum \tempcountb>0 \advance \tempcountb by60 \fi
+\ifnum \tempcounta<\tempcountb \tempcounta=\tempcountb \fi
+}
+
+% In this procedure, #1 is the paramater that sticks out all the way,
+% #2 sticks out the least and #3 is a label sticking out half way. #4 is
+% the amount of the offset.
+
+\def\sladjust[#1`#2`#3]#4{%
+\tempcountc=#4
+\horsize{\tempcounta}{#1}%
+\divide \tempcounta by2
+\horsize{\tempcountb}{#2}%
+\divide \tempcountb by2
+\advance \tempcountb by-\tempcountc
+\ifnum \tempcounta<\tempcountb \tempcounta=\tempcountb\fi
+\divide \tempcountc by2
+\horsize{\tempcountb}{#3}%
+\advance \tempcountb by-\tempcountc
+\ifnum \tempcountb>0 \advance \tempcountb by80\fi
+\ifnum \tempcounta<\tempcountb \tempcounta=\tempcountb\fi
+\advance\tempcounta by20
+}
+
+\def\putvector(#1,#2)(#3,#4)#5#6{{%
+\xpos=#1
+\ypos=#2
+\run=#3
+\rise=#4
+\arrowlength=#5
+\arrowtype=#6
+\ifnum \arrowtype<0
+ \ifnum \run=0
+ \advance \ypos by-\arrowlength
+ \else
+ \tempcounta \arrowlength
+ \multiply \tempcounta by\rise
+ \divide \tempcounta by\run
+ \ifnum\run>0
+ \advance \xpos by\arrowlength
+ \advance \ypos by\tempcounta
+ \else
+ \advance \xpos by-\arrowlength
+ \advance \ypos by-\tempcounta
+ \fi
+ \fi
+ \multiply \arrowtype by-1
+ \multiply \rise by-1
+ \multiply \run by-1
+\fi
+\ifnum \arrowtype=1
+ \put(\xpos,\ypos){\vector(\run,\rise){\arrowlength}}%
+\else\ifnum \arrowtype=2
+ \put(\xpos,\ypos){\mvector(\run,\rise)\arrowlength}%
+\else\ifnum\arrowtype=3
+ \put(\xpos,\ypos){\evector(\run,\rise){\arrowlength}}%
+\fi\fi\fi
+}}
+
+\def\putsplitvector(#1,#2)#3#4{%%
+\xpos #1
+\ypos #2
+\arrowtype #4
+\halflength #3
+\arrowlength #3
+\gap 140
+\advance \halflength by-\gap
+\divide \halflength by2
+\ifnum \arrowtype=1
+ \put(\xpos,\ypos){\line(0,-1){\halflength}}%
+ \advance\ypos by-\halflength
+ \advance\ypos by-\gap
+ \put(\xpos,\ypos){\vector(0,-1){\halflength}}%
+\else\ifnum \arrowtype=2
+ \put(\xpos,\ypos){\line(0,-1)\halflength}%
+ \put(\xpos,\ypos){\vector(0,-1)3}%
+ \advance\ypos by-\halflength
+ \advance\ypos by-\gap
+ \put(\xpos,\ypos){\vector(0,-1){\halflength}}%
+\else\ifnum\arrowtype=3
+ \put(\xpos,\ypos){\line(0,-1)\halflength}%
+ \advance\ypos by-\halflength
+ \advance\ypos by-\gap
+ \put(\xpos,\ypos){\evector(0,-1){\halflength}}%
+\else\ifnum \arrowtype=-1
+ \advance \ypos by-\arrowlength
+ \put(\xpos,\ypos){\line(0,1){\halflength}}%
+ \advance\ypos by\halflength
+ \advance\ypos by\gap
+ \put(\xpos,\ypos){\vector(0,1){\halflength}}%
+\else\ifnum \arrowtype=-2
+ \advance \ypos by-\arrowlength
+ \put(\xpos,\ypos){\line(0,1)\halflength}%
+ \put(\xpos,\ypos){\vector(0,1)3}%
+ \advance\ypos by\halflength
+ \advance\ypos by\gap
+ \put(\xpos,\ypos){\vector(0,1){\halflength}}%
+\else\ifnum\arrowtype=-3
+ \advance \ypos by-\arrowlength
+ \put(\xpos,\ypos){\line(0,1)\halflength}%
+ \advance\ypos by\halflength
+ \advance\ypos by\gap
+ \put(\xpos,\ypos){\evector(0,1){\halflength}}%
+\fi\fi\fi\fi\fi\fi
+}
+
+\def\putmorphism(#1)(#2,#3)[#4`#5`#6]#7#8#9{{%
+\run #2
+\rise #3
+\ifnum\rise=0
+ \puthmorphism(#1)[#4`#5`#6]{#7}{#8}{#9}%
+\else\ifnum\run=0
+ \putvmorphism(#1)[#4`#5`#6]{#7}{#8}{#9}%
+\else
+\setpos(#1)%
+\arrowlength #7
+\arrowtype #8
+\ifnum\run=0
+\else\ifnum\rise=0
+\else
+\ifnum\run>0
+ \coefa=1
+\else
+ \coefa=-1
+\fi
+\ifnum\arrowtype>0
+ \coefb=0
+ \coefc=-1
+\else
+ \coefb=\coefa
+ \coefc=1
+ \arrowtype=-\arrowtype
+\fi
+\width=2
+\multiply \width by\run
+\divide \width by\rise
+\ifnum \width<0 \width=-\width\fi
+\advance\width by60
+\if l#9 \width=-\width\fi
+\putbox(\xpos,\ypos){#4}% %node 1
+{\multiply \coefa by\arrowlength% %node 2
+\advance\xpos by\coefa
+\multiply \coefa by\rise
+\divide \coefa by\run
+\advance \ypos by\coefa
+\putbox(\xpos,\ypos){#5} }%
+{\multiply \coefa by\arrowlength% %label
+\divide \coefa by2
+\advance \xpos by\coefa
+\advance \xpos by\width
+\multiply \coefa by\rise
+\divide \coefa by\run
+\advance \ypos by\coefa
+\if l#9%
+ \put(\xpos,\ypos){\makebox(0,0)[r]{$#6$}}%
+\else\if r#9%
+ \put(\xpos,\ypos){\makebox(0,0)[l]{$#6$}}%
+\fi\fi }%
+{\multiply \rise by-\coefc% %arrow
+\multiply \run by-\coefc
+\multiply \coefb by\arrowlength
+\advance \xpos by\coefb
+\multiply \coefb by\rise
+\divide \coefb by\run
+\advance \ypos by\coefb
+\multiply \coefc by70
+\advance \ypos by\coefc
+\multiply \coefc by\run
+\divide \coefc by\rise
+\advance \xpos by\coefc
+\multiply \coefa by140
+\multiply \coefa by\run
+\divide \coefa by\rise
+\advance \arrowlength by\coefa
+\ifnum \arrowtype=1
+ \put(\xpos,\ypos){\vector(\run,\rise){\arrowlength}}%
+\else\ifnum\arrowtype=2
+ \put(\xpos,\ypos){\mvector(\run,\rise){\arrowlength}}%
+\else\ifnum\arrowtype=3
+ \put(\xpos,\ypos){\evector(\run,\rise){\arrowlength}}%
+\fi\fi\fi}\fi\fi\fi\fi}}
+
+\def\puthmorphism(#1,#2)[#3`#4`#5]#6#7#8{{%
+\xpos #1
+\ypos #2
+\width #6
+\arrowlength #6
+\putbox(\xpos,\ypos){#3\vphantom{#4}}%
+{\advance \xpos by\arrowlength
+\putbox(\xpos,\ypos){\vphantom{#3}#4}}%
+\horsize{\tempcounta}{#3}%
+\horsize{\tempcountb}{#4}%
+\divide \tempcounta by2
+\divide \tempcountb by2
+\advance \tempcounta by30
+\advance \tempcountb by30
+\advance \xpos by\tempcounta
+\advance \arrowlength by-\tempcounta
+\advance \arrowlength by-\tempcountb
+\putvector(\xpos,\ypos)(1,0){\arrowlength}{#7}%
+\divide \arrowlength by2
+\advance \xpos by\arrowlength
+\vertsize{\tempcounta}{#5}%
+\divide\tempcounta by2
+\advance \tempcounta by20
+\if a#8 %
+ \advance \ypos by\tempcounta
+ \putbox(\xpos,\ypos){#5}%
+\else
+ \advance \ypos by-\tempcounta
+ \putbox(\xpos,\ypos){#5}%
+\fi}}
+
+\def\putvmorphism(#1,#2)[#3`#4`#5]#6#7#8{{%
+\xpos #1
+\ypos #2
+\arrowlength #6
+\arrowtype #7
+\settowidth{\xlen}{$#5$}%
+\putbox(\xpos,\ypos){#3}%
+{\advance \ypos by-\arrowlength
+\putbox(\xpos,\ypos){#4}}%
+{\advance\arrowlength by-140
+\advance \ypos by-70
+\ifdim\xlen>0pt
+ \if m#8%
+ \putsplitvector(\xpos,\ypos){\arrowlength}{\arrowtype}%
+ \else
+ \putvector(\xpos,\ypos)(0,-1){\arrowlength}{\arrowtype}%
+ \fi
+\else
+ \putvector(\xpos,\ypos)(0,-1){\arrowlength}{\arrowtype}%
+\fi}%
+\ifdim\xlen>0pt
+ \divide \arrowlength by2
+ \advance\ypos by-\arrowlength
+ \if l#8%
+ \advance \xpos by-40
+ \put(\xpos,\ypos){\makebox(0,0)[r]{$#5$}}%
+ \else\if r#8%
+ \advance \xpos by40
+ \put(\xpos,\ypos){\makebox(0,0)[l]{$#5$}}%
+ \else
+ \putbox(\xpos,\ypos){#5}%
+ \fi\fi
+\fi
+}}
+
+\def\topadjust[#1`#2`#3]{%
+\yoff=10
+\vertadjust[#1`#2`{#3}]%
+\advance \yext by\tempcounta
+\advance \yext by 10
+}
+\def\botadjust[#1`#2`#3]{%
+\vertadjust[#1`#2`{#3}]%
+\advance \yext by\tempcounta
+\advance \yoff by-\tempcounta
+}
+\def\leftadjust[#1`#2`#3]{%
+\xoff=0
+\horadjust[#1`#2`{#3}]%
+\advance \xext by\tempcounta
+\advance \xoff by-\tempcounta
+}
+\def\rightadjust[#1`#2`#3]{%
+\horadjust[#1`#2`{#3}]%
+\advance \xext by\tempcounta
+}
+\def\rightsladjust[#1`#2`#3]{%
+\sladjust[#1`#2`{#3}]{\width}%
+\advance \xext by\tempcounta
+}
+\def\leftsladjust[#1`#2`#3]{%
+\xoff=0
+\sladjust[#1`#2`{#3}]{\width}%
+\advance \xext by\tempcounta
+\advance \xoff by-\tempcounta
+}
+\def\adjust[#1`#2;#3`#4;#5`#6;#7`#8]{%
+\topadjust[#1``{#2}]
+\leftadjust[#3``{#4}]
+\rightadjust[#5``{#6}]
+\botadjust[#7``{#8}]}
+
+\def\putsquarep<#1>(#2)[#3;#4`#5`#6`#7]{{%
+\setsqparms[#1]%
+\setpos(#2)%
+\settokens[#3]%
+\puthmorphism(\xpos,\ypos)[\tokenc`\tokend`{#7}]{\width}{\arrowtyped}b%
+\advance\ypos by \height
+\puthmorphism(\xpos,\ypos)[\tokena`\tokenb`{#4}]{\width}{\arrowtypea}a%
+\putvmorphism(\xpos,\ypos)[``{#5}]{\height}{\arrowtypeb}l%
+\advance\xpos by \width
+\putvmorphism(\xpos,\ypos)[``{#6}]{\height}{\arrowtypec}r%
+}}
+
+\def\putsquare{\@ifnextchar <{\putsquarep}{\putsquarep%
+ <\arrowtypea`\arrowtypeb`\arrowtypec`\arrowtyped;\width`\height>}}
+\def\square{\@ifnextchar< {\squarep}{\squarep
+ <\arrowtypea`\arrowtypeb`\arrowtypec`\arrowtyped;\width`\height>}}
+ % #6
+\def\squarep<#1>[#2`#3`#4`#5;#6`#7`#8`#9]{{% % #2------>#3
+\setsqparms[#1]% % | |
+\xext=\width % | |
+\yext=\height % #7| |#8
+\topadjust[#2`#3`{#6}]% % | |
+\botadjust[#4`#5`{#9}]% % | |
+\leftadjust[#2`#4`{#7}]% % v v
+\rightadjust[#3`#5`{#8}]% % #4------>#5
+\begin{picture}(\xext,\yext)(\xoff,\yoff)% #9
+\putsquarep<\arrowtypea`\arrowtypeb`\arrowtypec`\arrowtyped;\width`\height>%
+(0,0)[#2`#3`#4`#5;#6`#7`#8`{#9}]%
+\end{picture}%
+}}
+
+\def\putptrianglep<#1>(#2,#3)[#4`#5`#6;#7`#8`#9]{{%
+\settriparms[#1]%
+\xpos=#2 \ypos=#3
+\advance\ypos by \height
+\puthmorphism(\xpos,\ypos)[#4`#5`{#7}]{\height}{\arrowtypea}a%
+\putvmorphism(\xpos,\ypos)[`#6`{#8}]{\height}{\arrowtypeb}l%
+\advance\xpos by\height
+\putmorphism(\xpos,\ypos)(-1,-1)[``{#9}]{\height}{\arrowtypec}r%
+}}
+
+\def\putptriangle{\@ifnextchar <{\putptrianglep}{\putptrianglep
+ <\arrowtypea`\arrowtypeb`\arrowtypec;\height>}}
+\def\ptriangle{\@ifnextchar <{\ptrianglep}{\ptrianglep
+ <\arrowtypea`\arrowtypeb`\arrowtypec;\height>}}
+
+\def\ptrianglep<#1>[#2`#3`#4;#5`#6`#7]{{%% #5
+\settriparms[#1]%
+\width=\height % #2----->#3
+\xext=\width % | /
+\yext=\width % | /
+\topadjust[#2`#3`{#5}]% % #6| /#7
+\botadjust[#3``]% % | /
+\leftadjust[#2`#4`{#6}]% % | /
+\rightsladjust[#3`#4`{#7}]% % v v
+\begin{picture}(\xext,\yext)(\xoff,\yoff)% #4
+\putptrianglep<\arrowtypea`\arrowtypeb`\arrowtypec;\height>%
+(0,0)[#2`#3`#4;#5`#6`{#7}]%
+\end{picture}%
+}}
+
+\def\putqtrianglep<#1>(#2,#3)[#4`#5`#6;#7`#8`#9]{{%
+\settriparms[#1]%
+\xpos=#2 \ypos=#3
+\advance\ypos by\height
+\puthmorphism(\xpos,\ypos)[#4`#5`{#7}]{\height}{\arrowtypea}a%
+\putmorphism(\xpos,\ypos)(1,-1)[``{#8}]{\height}{\arrowtypeb}l%
+\advance\xpos by\height
+\putvmorphism(\xpos,\ypos)[`#6`{#9}]{\height}{\arrowtypec}r%
+}}
+
+\def\putqtriangle{\@ifnextchar <{\putqtrianglep}{\putqtrianglep
+ <\arrowtypea`\arrowtypeb`\arrowtypec;\height>}}
+\def\qtriangle{\@ifnextchar <{\qtrianglep}{\qtrianglep
+ <\arrowtypea`\arrowtypeb`\arrowtypec;\height>}}
+
+\def\qtrianglep<#1>[#2`#3`#4;#5`#6`#7]{{%%
+\settriparms[#1]% #5
+\width=\height % #2----->#3
+\xext=\width % \ |
+\yext=\height % \ |
+\topadjust[#2`#3`{#5}]% % #6\ |#7
+\botadjust[#4``]% % \ |
+\leftsladjust[#2`#4`{#6}]% % \ |
+\rightadjust[#3`#4`{#7}]% % v v
+\begin{picture}(\xext,\yext)(\xoff,\yoff)% #4
+\putqtrianglep<\arrowtypea`\arrowtypeb`\arrowtypec;\height>%
+(0,0)[#2`#3`#4;#5`#6`{#7}]%
+\end{picture}%
+}}
+
+\def\putdtrianglep<#1>(#2,#3)[#4`#5`#6;#7`#8`#9]{{%
+\settriparms[#1]%
+\xpos=#2 \ypos=#3
+\puthmorphism(\xpos,\ypos)[#5`#6`{#9}]{\height}{\arrowtypec}b%
+\advance\xpos by \height \advance\ypos by\height
+\putmorphism(\xpos,\ypos)(-1,-1)[``{#7}]{\height}{\arrowtypea}l%
+\putvmorphism(\xpos,\ypos)[#4``{#8}]{\height}{\arrowtypeb}r%
+}}
+
+\def\putdtriangle{\@ifnextchar <{\putdtrianglep}{\putdtrianglep
+ <\arrowtypea`\arrowtypeb`\arrowtypec;\height>}}
+\def\dtriangle{\@ifnextchar <{\dtrianglep}{\dtrianglep
+ <\arrowtypea`\arrowtypeb`\arrowtypec;\height>}}
+
+\def\dtrianglep<#1>[#2`#3`#4;#5`#6`#7]{{%%
+\settriparms[#1]% #2
+\width=\height % / |
+\xext=\width % / |
+\yext=\height % #5/ |#6
+\topadjust[#2``]% % / |
+\botadjust[#3`#4`{#7}]% % / |
+\leftsladjust[#3`#2`{#5}]% % v v
+\rightadjust[#2`#4`{#6}]% % #3----->#4
+\begin{picture}(\xext,\yext)(\xoff,\yoff)% #7
+\putdtrianglep<\arrowtypea`\arrowtypeb`\arrowtypec;\height>%
+(0,0)[#2`#3`#4;#5`#6`{#7}]%
+\end{picture}%
+}}
+
+\def\putbtrianglep<#1>(#2,#3)[#4`#5`#6;#7`#8`#9]{{%
+\settriparms[#1]%
+\xpos=#2 \ypos=#3
+\puthmorphism(\xpos,\ypos)[#5`#6`{#9}]{\height}{\arrowtypec}b%
+\advance\ypos by\height
+\putmorphism(\xpos,\ypos)(1,-1)[``{#8}]{\height}{\arrowtypeb}r%
+\putvmorphism(\xpos,\ypos)[#4``{#7}]{\height}{\arrowtypea}l%
+}}
+
+\def\putbtriangle{\@ifnextchar <{\putbtrianglep}{\putbtrianglep
+ <\arrowtypea`\arrowtypeb`\arrowtypec;\height>}}
+\def\btriangle{\@ifnextchar <{\btrianglep}{\btrianglep
+ <\arrowtypea`\arrowtypeb`\arrowtypec;\height>}}
+
+\def\btrianglep<#1>[#2`#3`#4;#5`#6`#7]{{%%
+\settriparms[#1]% #2
+\width=\height % | \
+\xext=\width % | \
+\yext=\height % #5| \#6
+\topadjust[#2``]% % | \
+\botadjust[#3`#4`{#7}]% % | \
+\leftadjust[#2`#3`{#5}]% % v v
+\rightsladjust[#4`#2`{#6}]% % #3----->#4
+\begin{picture}(\xext,\yext)(\xoff,\yoff)% #7
+\putbtrianglep<\arrowtypea`\arrowtypeb`\arrowtypec;\height>%
+(0,0)[#2`#3`#4;#5`#6`{#7}]%
+\end{picture}%
+}}
+
+\def\putAtrianglep<#1>(#2,#3)[#4`#5`#6;#7`#8`#9]{{%
+\settriparms[#1]%
+\xpos=#2 \ypos=#3
+{\multiply \height by2
+\puthmorphism(\xpos,\ypos)[#5`#6`{#9}]{\height}{\arrowtypec}b}%
+\advance\xpos by\height \advance\ypos by\height
+\putmorphism(\xpos,\ypos)(-1,-1)[#4``{#7}]{\height}{\arrowtypea}l%
+\putmorphism(\xpos,\ypos)(1,-1)[``{#8}]{\height}{\arrowtypeb}r%
+}}
+
+\def\putAtriangle{\@ifnextchar <{\putAtrianglep}{\putAtrianglep
+ <\arrowtypea`\arrowtypeb`\arrowtypec;\height>}}
+\def\Atriangle{\@ifnextchar <{\Atrianglep}{\Atrianglep
+ <\arrowtypea`\arrowtypeb`\arrowtypec;\height>}}
+
+\def\Atrianglep<#1>[#2`#3`#4;#5`#6`#7]{{%%
+\settriparms[#1]% #2
+\width=\height % / \
+\xext=\width % / \
+\yext=\height % #5/ \#6
+\topadjust[#2``]% % / \
+\botadjust[#3`#4`{#7}]% % / \
+\multiply \xext by2 % % v v
+\leftsladjust[#3`#2`{#5}]% % #3------------>#4
+\rightsladjust[#4`#2`{#6}]% % #7
+\begin{picture}(\xext,\yext)(\xoff,\yoff)%
+\putAtrianglep<\arrowtypea`\arrowtypeb`\arrowtypec;\height>%
+(0,0)[#2`#3`#4;#5`#6`{#7}]%
+\end{picture}%
+}}
+
+\def\putAtrianglepairp<#1>(#2)[#3;#4`#5`#6`#7`#8]{{%
+\settripairparms[#1]%
+\setpos(#2)%
+\settokens[#3]%
+\puthmorphism(\xpos,\ypos)[\tokenb`\tokenc`{#7}]{\height}{\arrowtyped}b%
+\advance\xpos by\height
+\advance\ypos by\height
+\putmorphism(\xpos,\ypos)(-1,-1)[\tokena``{#4}]{\height}{\arrowtypea}l%
+\putvmorphism(\xpos,\ypos)[``{#5}]{\height}{\arrowtypeb}m%
+\putmorphism(\xpos,\ypos)(1,-1)[``{#6}]{\height}{\arrowtypec}r%
+}}
+
+\def\putAtrianglepair{\@ifnextchar <{\putAtrianglepairp}{\putAtrianglepairp%
+ <\arrowtypea`\arrowtypeb`\arrowtypec`\arrowtyped`\arrowtypee;\height>}}
+\def\Atrianglepair{\@ifnextchar <{\Atrianglepairp}{\Atrianglepairp%
+ <\arrowtypea`\arrowtypeb`\arrowtypec`\arrowtyped`\arrowtypee;\height>}}
+
+\def\Atrianglepairp<#1>[#2;#3`#4`#5`#6`#7]{{%
+\settripairparms[#1]%
+\settokens[#2]%
+\width=\height
+\xext=\width
+\yext=\height
+\topadjust[\tokena``]%
+\vertadjust[\tokenb`\tokenc`{#6}]% % #2a
+\tempcountd=\tempcounta % / | \
+\vertadjust[\tokenc`\tokend`{#7}]% % / | \
+\ifnum\tempcounta<\tempcountd % #3/ #4 \#5
+\tempcounta=\tempcountd\fi % / | \
+\advance \yext by\tempcounta % / | \
+\advance \yoff by-\tempcounta % v v v
+\multiply \xext by2 % % #2b---->#2c---->#2d
+\leftsladjust[\tokenb`\tokena`{#3}]% % #6 #7
+\rightsladjust[\tokend`\tokena`{#5}]%
+\begin{picture}(\xext,\yext)(\xoff,\yoff)%
+\putAtrianglepairp
+<\arrowtypea`\arrowtypeb`\arrowtypec`\arrowtyped`\arrowtypee;\height>%
+(0,0)[#2;#3`#4`#5`#6`{#7}]%
+\end{picture}%
+}}
+
+\def\putVtrianglep<#1>(#2,#3)[#4`#5`#6;#7`#8`#9]{{%
+\settriparms[#1]%
+\xpos=#2 \ypos=#3
+\advance\ypos by\height
+{\multiply\height by2
+\puthmorphism(\xpos,\ypos)[#4`#5`{#7}]{\height}{\arrowtypea}a}%
+\putmorphism(\xpos,\ypos)(1,-1)[`#6`{#8}]{\height}{\arrowtypeb}l%
+\advance\xpos by\height
+\advance\xpos by\height
+\putmorphism(\xpos,\ypos)(-1,-1)[``{#9}]{\height}{\arrowtypec}r%
+}}
+
+\def\putVtriangle{\@ifnextchar <{\putVtrianglep}{\putVtrianglep
+ <\arrowtypea`\arrowtypeb`\arrowtypec;\height>}}
+\def\Vtriangle{\@ifnextchar <{\Vtrianglep}{\Vtrianglep
+ <\arrowtypea`\arrowtypeb`\arrowtypec;\height>}}
+
+\def\Vtrianglep<#1>[#2`#3`#4;#5`#6`#7]{{%%
+\settriparms[#1]% #5
+\width=\height % #2------------->#3
+\xext=\width % \ /
+\yext=\height % \ /
+\topadjust[#2`#3`{#5}]% % #6\ /#7
+\botadjust[#4``]% % \ /
+\multiply \xext by2 % % \ /
+\leftsladjust[#2`#3`{#6}]% % v v
+\rightsladjust[#3`#4`{#7}]% % #4
+\begin{picture}(\xext,\yext)(\xoff,\yoff)%
+\putVtrianglep<\arrowtypea`\arrowtypeb`\arrowtypec;\height>%
+(0,0)[#2`#3`#4;#5`#6`{#7}]%
+\end{picture}%
+}}
+
+\def\putVtrianglepairp<#1>(#2)[#3;#4`#5`#6`#7`#8]{{
+\settripairparms[#1]%
+\setpos(#2)%
+\settokens[#3]%
+\advance\ypos by\height
+\putmorphism(\xpos,\ypos)(1,-1)[`\tokend`{#6}]{\height}{\arrowtypec}l%
+\puthmorphism(\xpos,\ypos)[\tokena`\tokenb`{#4}]{\height}{\arrowtypea}a%
+\advance\xpos by\height
+\putvmorphism(\xpos,\ypos)[``{#7}]{\height}{\arrowtyped}m%
+\advance\xpos by\height
+\putmorphism(\xpos,\ypos)(-1,-1)[``{#8}]{\height}{\arrowtypee}r%
+}}
+
+\def\putVtrianglepair{\@ifnextchar <{\putVtrianglepairp}{\putVtrianglepairp%
+ <\arrowtypea`\arrowtypeb`\arrowtypec`\arrowtyped`\arrowtypee;\height>}}
+\def\Vtrianglepair{\@ifnextchar <{\Vtrianglepairp}{\Vtrianglepairp%
+ <\arrowtypea`\arrowtypeb`\arrowtypec`\arrowtyped`\arrowtypee;\height>}}
+
+\def\Vtrianglepairp<#1>[#2;#3`#4`#5`#6`#7]{{%
+\settripairparms[#1]%
+\settokens[#2]% #3 #4
+\xext=\height % #2a---->#2b---->#2c
+\width=\height % \ | /
+\yext=\height % \ | /
+\vertadjust[\tokena`\tokenb`{#4}]% #5\ #6 /#7
+\tempcountd=\tempcounta % \ | /
+\vertadjust[\tokenb`\tokenc`{#5}]% \ | /
+\ifnum\tempcounta<\tempcountd% v v v
+\tempcounta=\tempcountd\fi% #2d
+\advance \yext by\tempcounta
+\botadjust[\tokend``]%
+\multiply \xext by2
+\leftsladjust[\tokena`\tokend`{#6}]%
+\rightsladjust[\tokenc`\tokend`{#7}]%
+\begin{picture}(\xext,\yext)(\xoff,\yoff)%
+\putVtrianglepairp
+<\arrowtypea`\arrowtypeb`\arrowtypec`\arrowtyped`\arrowtypee;\height>%
+(0,0)[#2;#3`#4`#5`#6`{#7}]%
+\end{picture}%
+}}
+
+
+\def\putCtrianglep<#1>(#2,#3)[#4`#5`#6;#7`#8`#9]{{%
+\settriparms[#1]%
+\xpos=#2 \ypos=#3
+\advance\ypos by\height
+\putmorphism(\xpos,\ypos)(1,-1)[``{#9}]{\height}{\arrowtypec}l%
+\advance\xpos by\height
+\advance\ypos by\height
+\putmorphism(\xpos,\ypos)(-1,-1)[#4`#5`{#7}]{\height}{\arrowtypea}l%
+{\multiply\height by 2
+\putvmorphism(\xpos,\ypos)[`#6`{#8}]{\height}{\arrowtypeb}r}%
+}}
+
+\def\putCtriangle{\@ifnextchar <{\putCtrianglep}{\putCtrianglep
+ <\arrowtypea`\arrowtypeb`\arrowtypec;\height>}}
+\def\Ctriangle{\@ifnextchar <{\Ctrianglep}{\Ctrianglep
+ <\arrowtypea`\arrowtypeb`\arrowtypec;\height>}}
+
+\def\Ctrianglep<#1>[#2`#3`#4;#5`#6`#7]{{%%
+\settriparms[#1]% #2
+\width=\height % / |
+\xext=\width % / |
+\yext=\height % #5/ |
+\multiply \yext by2 % % / |
+\topadjust[#2``]% % / |
+\botadjust[#4``]% % v |
+\sladjust[#3`#2`{#5}]{\width}% % #3 |#6
+\tempcountd=\tempcounta % \ |
+\sladjust[#3`#4`{#7}]{\width}% % \ |
+\ifnum \tempcounta<\tempcountd % #7\ |
+\tempcounta=\tempcountd\fi % \ |
+\advance \xext by\tempcounta % \ |
+\advance \xoff by-\tempcounta % v v
+\rightadjust[#2`#4`{#6}]% % #4
+\begin{picture}(\xext,\yext)(\xoff,\yoff)%
+\putCtrianglep<\arrowtypea`\arrowtypeb`\arrowtypec;\height>%
+(0,0)[#2`#3`#4;#5`#6`{#7}]%
+\end{picture}%
+}}
+
+\def\putDtrianglep<#1>(#2,#3)[#4`#5`#6;#7`#8`#9]{{%
+\settriparms[#1]%
+\xpos=#2 \ypos=#3
+\advance\xpos by\height \advance\ypos by\height
+\putmorphism(\xpos,\ypos)(-1,-1)[``{#9}]{\height}{\arrowtypec}r%
+\advance\xpos by-\height \advance\ypos by\height
+\putmorphism(\xpos,\ypos)(1,-1)[`#5`{#8}]{\height}{\arrowtypeb}r%
+{\multiply\height by 2
+\putvmorphism(\xpos,\ypos)[#4`#6`{#7}]{\height}{\arrowtypea}l}%
+}}
+
+\def\putDtriangle{\@ifnextchar <{\putDtrianglep}{\putDtrianglep
+ <\arrowtypea`\arrowtypeb`\arrowtypec;\height>}}
+\def\Dtriangle{\@ifnextchar <{\Dtrianglep}{\Dtrianglep
+ <\arrowtypea`\arrowtypeb`\arrowtypec;\height>}}
+
+\def\Dtrianglep<#1>[#2`#3`#4;#5`#6`#7]{{%%
+\settriparms[#1]% #2
+\width=\height % | \
+\xext=\height % | \
+\yext=\height % | \#6
+\multiply \yext by2 % % | \
+\topadjust[#2``]% % | \
+\botadjust[#4``]% % | v
+\leftadjust[#2`#4`{#5}]% % #5| #3
+\sladjust[#3`#2`{#5}]{\height}% % | /
+\tempcountd=\tempcountd % | /
+\sladjust[#3`#4`{#7}]{\height}% % | /#7
+\ifnum \tempcounta<\tempcountd % | /
+\tempcounta=\tempcountd\fi % | /
+\advance \xext by\tempcounta % v v
+\begin{picture}(\xext,\yext)(\xoff,\yoff)% #4
+\putDtrianglep<\arrowtypea`\arrowtypeb`\arrowtypec;\height>%
+(0,0)[#2`#3`#4;#5`#6`{#7}]%
+\end{picture}%
+}}
+
+\def\setrecparms[#1`#2]{\width=#1 \height=#2}%
+% #4
+% #3b<-------#3a x #3b
+% ^ | |
+% / | |
+% #5/ | |
+% / | |
+% / | |
+% / | |
+% #3c |#6 |#3a x #5
+% \ | |
+% \ | |
+% #8\ | |
+% \ | |
+% \ | |
+% v v v
+% #3d<-------#3a x #3d
+% #8
+
+\def\recursep<#1`#2>[#3;#4`#5`#6`#7`#8]{{%
+\width=#1 \height=#2
+\settokens[#3]
+\settowidth{\tempdimen}{$\tokena$}
+\ifdim\tempdimen=0pt
+ \savebox{\tempboxa}{\hbox{$\tokenb$}}%
+ \savebox{\tempboxb}{\hbox{$\tokend$}}%
+ \savebox{\tempboxc}{\hbox{$#6$}}%
+\else
+ \savebox{\tempboxa}{\hbox{$\hbox{$\tokena$}\times\hbox{$\tokenb$}$}}%
+ \savebox{\tempboxb}{\hbox{$\hbox{$\tokena$}\times\hbox{$\tokend$}$}}%
+ \savebox{\tempboxc}{\hbox{$\hbox{$\tokena$}\times\hbox{$#6$}$}}%
+\fi
+\ypos=\height
+\divide\ypos by 2
+\xpos=\ypos
+\advance\xpos by \width
+\xext=\xpos \yext=\height
+\topadjust[#3`\usebox{\tempboxa}`{#4}]%
+\botadjust[#5`\usebox{\tempboxb}`{#8}]%
+\sladjust[\tokenc`\tokenb`{#5}]{\ypos}%
+\tempcountd=\tempcounta
+\sladjust[\tokenc`\tokend`{#5}]{\ypos}%
+\ifnum \tempcounta<\tempcountd
+\tempcounta=\tempcountd\fi
+\advance \xext by\tempcounta
+\advance \xoff by-\tempcounta
+\rightadjust[\usebox{\tempboxa}`\usebox{\tempboxb}`\usebox{\tempboxc}]%
+\bfig
+\putCtrianglep<-1`1`1;\ypos>(0,0)[`\tokenc`;#5`#6`{#7}]%
+\puthmorphism(\ypos,0)[\tokend`\usebox{\tempboxb}`{#8}]{\width}{-1}b%
+\puthmorphism(\ypos,\height)[\tokenb`\usebox{\tempboxa}`{#4}]{\width}{-1}a%
+\advance\ypos by \width
+\putvmorphism(\ypos,\height)[``\usebox{\tempboxc}]{\height}1r%
+\efig
+}}
+
+\def\recurse{\@ifnextchar <{\recursep}{\recursep<\width`\height>}}
+
+\def\puttwohmorphisms(#1,#2)[#3`#4;#5`#6]#7#8#9{{%
+% 1 and 2 are position, 3 and 4 are the nodes, 5 and 6 the labels,
+% 7 the distance between node centers and 8 & 9 are the arrow types.
+% #5
+% #3 ===========> #4
+% #6
+%
+\puthmorphism(#1,#2)[#3`#4`]{#7}0a
+\ypos=#2
+\advance\ypos by 20
+\puthmorphism(#1,\ypos)[\phantom{#3}`\phantom{#4}`#5]{#7}{#8}a
+\advance\ypos by -40
+\puthmorphism(#1,\ypos)[\phantom{#3}`\phantom{#4}`#6]{#7}{#9}b
+}}
+
+\def\puttwovmorphisms(#1,#2)[#3`#4;#5`#6]#7#8#9{{%
+% 1 and 2 are position, 3 and 4 are the nodes, 5 and 6 the labels,
+% 7 the distance between node centers and 8 & 9 are the arrow types.
+%
+% #3
+% ||
+% ||
+% #5 || #6
+% ||
+% ||
+% vv
+% #4
+%
+\putvmorphism(#1,#2)[#3`#4`]{#7}0a
+\xpos=#1
+\advance\xpos by -20
+\putvmorphism(\xpos,#2)[\phantom{#3}`\phantom{#4}`#5]{#7}{#8}l
+\advance\xpos by 40
+\putvmorphism(\xpos,#2)[\phantom{#3}`\phantom{#4}`#6]{#7}{#9}r
+}}
+
+\def\puthcoequalizer(#1)[#2`#3`#4;#5`#6`#7]#8#9{{%
+% #1 is (\xpos,\ypos), the next 6 are the nodes and arrow labels
+% #8 is the distance between each pair of nodes and #9 is the pos of #7
+% either a (above) or b (below)
+% #5 #7
+% #2 ===========> #3 --------> #4
+% #6
+%
+\setpos(#1)%
+\puttwohmorphisms(\xpos,\ypos)[#2`#3;#5`#6]{#8}11%
+\advance\xpos by #8
+\puthmorphism(\xpos,\ypos)[\phantom{#3}`#4`#7]{#8}1{#9}
+}}
+
+\def\putvcoequalizer(#1)[#2`#3`#4;#5`#6`#7]#8#9{{%
+% #1 is (\xpos,\ypos), the next 6 are the nodes and arrow labels
+% #8 is the distance between each pair of nodes and #9 is the pos of #7
+% either l (left) or r (right)
+%
+% #2
+% | |
+% | |
+% #5 | | #6
+% | |
+% | |
+% v v
+% #3
+% |
+% |
+% #7 |
+% |
+% v
+% #4
+%
+\setpos(#1)%
+\puttwovmorphisms(\xpos,\ypos)[#2`#3;#5`#6]{#8}11%
+\advance\ypos by -#8
+\putvmorphism(\xpos,\ypos)[\phantom{#3}`#4`#7]{#8}1{#9}
+}}
+
+\def\putthreehmorphisms(#1)[#2`#3;#4`#5`#6]#7(#8)#9{{%
+% Use: \putthreehmorphisms(xpos,ypos)[lnode`rnode;toplabel`midlabel%
+% botlabel]{distance}(toparrowtype,midarrowtype,botarrowtype){position}
+\setpos(#1) \settypes(#8)
+\if a#9 %
+ \vertsize{\tempcounta}{#5}%
+ \vertsize{\tempcountb}{#6}%
+ \ifnum \tempcounta<\tempcountb \tempcounta=\tempcountb \fi
+\else
+ \vertsize{\tempcounta}{#4}%
+ \vertsize{\tempcountb}{#5}%
+ \ifnum \tempcounta<\tempcountb \tempcounta=\tempcountb \fi
+\fi
+\advance \tempcounta by 60
+\puthmorphism(\xpos,\ypos)[#2`#3`#5]{#7}{\arrowtypeb}{#9}
+\advance\ypos by \tempcounta
+\puthmorphism(\xpos,\ypos)[\phantom{#2}`\phantom{#3}`#4]{#7}{\arrowtypea}{#9}
+\advance\ypos by -\tempcounta \advance\ypos by -\tempcounta
+\puthmorphism(\xpos,\ypos)[\phantom{#2}`\phantom{#3}`#6]{#7}{\arrowtypec}{#9}
+}}
+
+%\def\putarc(#1,#2)[#3`#4`#5]#6#7#8{{%
+%\xpos #1
+%\ypos #2
+%\width #6
+%\arrowlength #6
+%\putbox(\xpos,\ypos){#3\vphantom{#4}}%
+%{\advance \xpos by\arrowlength
+%\putbox(\xpos,\ypos){\vphantom{#3}#4}}%
+%\horsize{\tempcounta}{#3}%
+%\horsize{\tempcountb}{#4}%
+%\divide \tempcounta by2
+%\divide \tempcountb by2
+%\advance \tempcounta by30
+%\advance \tempcountb by30
+%\advance \xpos by\tempcounta
+%\advance \arrowlength by-\tempcounta
+%\advance \arrowlength by-\tempcountb
+%\halflength=\arrowlength \divide\halflength by 2
+%\divide\arrowlength by 5
+%\put(\xpos,\ypos){\bezier{\arrowlength}(0,0)(50,50)(\halflength,50)}
+%\ifnum #7=-1 \put(\xpos,\ypos){\vector(-3,-2)0} \fi
+%\advance\xpos by \halflength
+%\put(\xpos,\ypos){\xpos=\halflength \advance\xpos by -50
+% \bezier{\arrowlength}(0,50)(\xpos,50)(\halflength,0)}
+%\ifnum #7=1 {\advance \xpos by
+% \halflength \put(\xpos,\ypos){\vector(3,-2)0}} \fi
+%\advance\ypos by 50
+%\vertsize{\tempcounta}{#5}%
+%\divide\tempcounta by2
+%\advance \tempcounta by20
+%\if a#8 %
+% \advance \ypos by\tempcounta
+% \putbox(\xpos,\ypos){#5}%
+%\else
+% \advance \ypos by-\tempcounta
+% \putbox(\xpos,\ypos){#5}%
+%\fi
+%}}
+
diff --git a/books/dissdef.sty b/books/dissdef.sty
new file mode 100644
index 0000000..ca3e2da
--- /dev/null
+++ b/books/dissdef.sty
@@ -0,0 +1,434 @@
+% Style file for my dissertation
+% contains all general macros
+
+
+
+\makeatletter
+
+% Symbole aus LLNCS.STY
+\def\squareforqed{\hbox{\rlap{$\sqcap$}$\sqcup$}}
+\def\qed{\ifmmode\squareforqed\else{\unskip\nobreak\hfil
+\penalty50\hskip1em\null\nobreak\hfil\squareforqed
+\parfillskip=0pt\finalhyphendemerits=0\endgraf}\fi}
+
+\def\getsto{\mathrel{\mathchoice {\vcenter{\offinterlineskip
+\halign{\hfil
+$\displaystyle##$\hfil\cr\gets\cr\to\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr\gets
+\cr\to\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr\gets
+\cr\to\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr
+\gets\cr\to\cr}}}}}
+\def\lid{\mathrel{\mathchoice {\vcenter{\offinterlineskip\halign{\hfil
+$\displaystyle##$\hfil\cr<\cr\noalign{\vskip1.2pt}=\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr<\cr
+\noalign{\vskip1.2pt}=\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr<\cr
+\noalign{\vskip1pt}=\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr
+<\cr
+\noalign{\vskip0.9pt}=\cr}}}}}
+\def\gid{\mathrel{\mathchoice {\vcenter{\offinterlineskip\halign{\hfil
+$\displaystyle##$\hfil\cr>\cr\noalign{\vskip1.2pt}=\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr>\cr
+\noalign{\vskip1.2pt}=\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr>\cr
+\noalign{\vskip1pt}=\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr
+>\cr
+\noalign{\vskip0.9pt}=\cr}}}}}
+\def\grole{\mathrel{\mathchoice {\vcenter{\offinterlineskip
+\halign{\hfil
+$\displaystyle##$\hfil\cr>\cr\noalign{\vskip-1pt}<\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr
+>\cr\noalign{\vskip-1pt}<\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr
+>\cr\noalign{\vskip-0.8pt}<\cr}}}
+{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr
+>\cr\noalign{\vskip-0.3pt}<\cr}}}}}
+\def\bbbr{{\rm I\!R}} %reelle Zahlen
+\def\bbbm{{\rm I\!M}}
+\def\bbbn{{\rm I\!N}} %natuerliche Zahlen
+\def\bbbf{{\rm I\!F}}
+\def\bbbh{{\rm I\!H}}
+\def\bbbk{{\rm I\!K}}
+\def\bbbp{{\rm I\!P}}
+\def\bbbone{{\mathchoice {\rm 1\mskip-4mu l} {\rm 1\mskip-4mu l}
+{\rm 1\mskip-4.5mu l} {\rm 1\mskip-5mu l}}}
+\def\bbbc{{\mathchoice {\setbox0=\hbox{$\displaystyle\rm C$}\hbox{\hbox
+to0pt{\kern0.4\wd0\vrule height0.9\ht0\hss}\box0}}
+{\setbox0=\hbox{$\textstyle\rm C$}\hbox{\hbox
+to0pt{\kern0.4\wd0\vrule height0.9\ht0\hss}\box0}}
+{\setbox0=\hbox{$\scriptstyle\rm C$}\hbox{\hbox
+to0pt{\kern0.4\wd0\vrule height0.9\ht0\hss}\box0}}
+{\setbox0=\hbox{$\scriptscriptstyle\rm C$}\hbox{\hbox
+to0pt{\kern0.4\wd0\vrule height0.9\ht0\hss}\box0}}}}
+\def\bbbq{{\mathchoice {\setbox0=\hbox{$\displaystyle\rm
+Q$}\hbox{\raise
+0.10\ht0\hbox to0pt{\kern0.4\wd0\vrule height0.85\ht0\hss}\box0}}
+{\setbox0=\hbox{$\textstyle\rm Q$}\hbox{\raise
+0.10\ht0\hbox to0pt{\kern0.4\wd0\vrule height0.85\ht0\hss}\box0}}
+{\setbox0=\hbox{$\scriptstyle\rm Q$}\hbox{\raise
+0.10\ht0\hbox to0pt{\kern0.4\wd0\vrule height0.7\ht0\hss}\box0}}
+{\setbox0=\hbox{$\scriptscriptstyle\rm Q$}\hbox{\raise
+0.10\ht0\hbox to0pt{\kern0.4\wd0\vrule height0.7\ht0\hss}\box0}}}}
+\def\bbbt{{\mathchoice {\setbox0=\hbox{$\displaystyle\rm
+T$}\hbox{\hbox to0pt{\kern0.3\wd0\vrule height0.9\ht0\hss}\box0}}
+{\setbox0=\hbox{$\textstyle\rm T$}\hbox{\hbox
+to0pt{\kern0.3\wd0\vrule height0.9\ht0\hss}\box0}}
+{\setbox0=\hbox{$\scriptstyle\rm T$}\hbox{\hbox
+to0pt{\kern0.3\wd0\vrule height0.9\ht0\hss}\box0}}
+{\setbox0=\hbox{$\scriptscriptstyle\rm T$}\hbox{\hbox
+to0pt{\kern0.3\wd0\vrule height0.9\ht0\hss}\box0}}}}
+\def\bbbs{{\mathchoice
+{\setbox0=\hbox{$\displaystyle \rm S$}\hbox{\raise0.5\ht0\hbox
+to0pt{\kern0.35\wd0\vrule height0.45\ht0\hss}\hbox
+to0pt{\kern0.55\wd0\vrule height0.5\ht0\hss}\box0}}
+{\setbox0=\hbox{$\textstyle \rm S$}\hbox{\raise0.5\ht0\hbox
+to0pt{\kern0.35\wd0\vrule height0.45\ht0\hss}\hbox
+to0pt{\kern0.55\wd0\vrule height0.5\ht0\hss}\box0}}
+{\setbox0=\hbox{$\scriptstyle \rm S$}\hbox{\raise0.5\ht0\hbox
+to0pt{\kern0.35\wd0\vrule height0.45\ht0\hss}\raise0.05\ht0\hbox
+to0pt{\kern0.5\wd0\vrule height0.45\ht0\hss}\box0}}
+{\setbox0=\hbox{$\scriptscriptstyle\rm S$}\hbox{\raise0.5\ht0\hbox
+to0pt{\kern0.4\wd0\vrule height0.45\ht0\hss}\raise0.05\ht0\hbox
+to0pt{\kern0.55\wd0\vrule height0.45\ht0\hss}\box0}}}}
+\def\bbbz{{\mathchoice {\hbox{$\sf\textstyle Z\kern-0.4em Z$}}
+{\hbox{$\sf\textstyle Z\kern-0.4em Z$}}
+{\hbox{$\sf\scriptstyle Z\kern-0.3em Z$}}
+{\hbox{$\sf\scriptscriptstyle Z\kern-0.2em Z$}}}}
+\def\ts{\thinspace}
+
+
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%Definitionen aus mathsing.sty
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%
+% New environments
+%
+% lemma, proposition, theorem, corollary (\bf,\it) (numbered)
+% exercise, problem, solution, definition (\bf,\rm)
+% 27.3.91 binding: example, note and question changed to (\bf, \rm)
+%
+% lemma*, proposition*, theorem*, corollary* (\bf,\it) (unnumbered)
+% exercise*, problem*, solution*, definition* (\bf,\rm)
+% example*, note*, question* (\it,\rm)
+%
+% remark, proof (\it,\rm) (unnumbered)
+%
+% usage: \begin{lemma} or \begin{lemma}[COMMENT]
+% ... ...
+% \end{lemma} \end{lemma}
+%
+%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+% currently one counter is used for all theorem like environments
+
+\newcounter{lemmacount}[chapter]
+\renewcommand{\thelemmacount}{\thechapter.\arabic{lemmacount}}
+
+
+%
+% short form for defininng new theorem like environments:
+% \newthe{NAME}{NAME*}{TITLE}{COUNTER}{FONT1}{FONT2}
+%
+\def\@@begthe#1{\@ifnextchar[{\@optbegthe#1}{\@begthe#1}}
+%27.3.91 binding: dot deleted
+\def\@begthe#1{. #1} %changed
+%\def\@begthe#1{.\ #1} %changed
+%\def\@begthe#1{ #1}
+\def\@optbegthe#1[#2]{ {#2}.\ #1}%changed AW
+%\def\@optbegthe#1[#2]{ {#2} #1}
+\newcommand{\newthe}[6]{
+ \def\nlni{\par\ifvmode\removelastskip\fi\vskip\baselineskip\noindent}
+ \def\xxxend{\endgroup\vskip\baselineskip}
+ \newenvironment{#1}{\nlni\begingroup\refstepcounter{#4}#5#3
+%changed 91/10/7 fuh:\arabic{chapter}.\arabic{#4}\@@begthe{#6}}{\xxxend}
+ \thechapter.\arabic{#4}\@@begthe{#6}}{\xxxend}
+ \newenvironment{#2}{\nlni\begingroup#5#3\@@begthe{#6}}{\xxxend}}
+
+
+% Lemma, Proposition, Theorem, Corollary (\bf,\it)
+
+%\newthe{lemma}{lemma*}{Lemma}{lemmacount}{\bf}{\it}
+%\newthe{proposition}{proposition*}{Proposition}{lemmacount}{\bf}{\it}
+%\newthe{theorem}{theorem*}{Theorem}{lemmacount}{\bf}{\it}
+%\newthe{corollary}{corollary*}{Corollary}{lemmacount}{\bf}{\it}
+
+
+% Exercise, Problem, Solution, Definition (\bf,\rm)
+
+\newthe{exercise}{exercise*}{exercise}{lemmacount}{\bf}{\it}
+\newthe{problem}{problem*}{Problem}{lemmacount}{\bf}{\it}
+\newthe{solution}{solution*}{Solution}{lemmacount}{\bf}{\it}
+%\newthe{definition}{definition*}{Definition}{lemmacount}{\bf}{\it}
+
+
+% Example, Note, Question (\bf,\rm)
+
+%\newthe{example}{example*}{Example}{lemmacount}{\bf}{\rm}
+\newthe{note}{note*}{Note}{lemmacount}{\bf}{\rm}
+\newthe{question}{question*}{Question}{lemmacount}{\bf}{\rm}
+
+% Remark, Proof
+
+%\newenvironment{remark}{\nlni\begingroup\it Remark. \rm}{
+% \endgroup\vskip\baselineskip}
+%\newenvironment{proof}{\nlni\begingroup\it Proof. \rm}{
+% \endgroup\vskip\baselineskip}
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%
+% qed
+%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\def\squareforqed{\hbox{\rlap{$\sqcap$}$\sqcup$}}
+\def\qed{\ifmmode\squareforqed\else{\unskip\nobreak\hfil
+\penalty50\hskip1em\null\nobreak\hfil\squareforqed
+\parfillskip=0pt\finalhyphendemerits=0\endgraf}\fi}
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%
+% \eqnarticle simple equation numbers without chapter number
+% \eqnbook structured equation numbers (default)
+% changed by binding 5.2.91: changed to \numberlikearticle and
+% \numberlikebook, changing numbering of
+% figures and tables also.
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\def\numberlikearticle{\global\def\theequation{\arabic{equation}}
+\global\def\thetable{\arabic{table}}
+\global\def\thefigure{\arabic{figure}}}
+\def\numberlikebook{\global\def\theequation{\thechapter.\arabic{equation}}
+\global\def\thetable{\thechapter.\arabic{table}}
+\global\def\thefigure{\thechapter.\arabic{figure}}}
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%
+% Redeclaration of \makeatletter; no @-expressions may be used from now on
+%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Eigene Definitionen
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\newcommand{\deflabel}[1]{\bf #1\hfill}%
+\newenvironment{deflist}[1]%
+{\begin{list}{}%
+{\settowidth{\labelwidth}{\bf #1}%
+\setlength{\leftmargin}{\labelwidth}%
+\addtolength{\leftmargin}{\labelsep}%
+\renewcommand{\makelabel}{\deflabel}}}%
+{\end{list}}%
+
+
+
+\newcounter{assumptcount}[chapter]
+\renewcommand{\theassumptcount}{\thechapter.\arabic{assumptcount}}
+
+\newthe{assumption}{assumption*}{Assumption}{assumptcount}{\bf}{\sl}
+%\documentstyle{mathsing}
+%\newthe{conjecture}{conjecture*}{Conjecture}{lemmacount}{\bf}{\it}
+\def\frak{\rm}
+ \numberlikebook
+% \numberlikearticle
+
+%TeX book, p. 386
+\def\pmb#1{\setbox0=\hbox{#1}%
+ \kern-.025em\copy0\kern-\wd0
+ \kern.05em\copy0\kern-\wd0
+ \kern-.025em\raise.0433em\box0 }
+
+\def\aldesbegbr{\pmb{$\bigl\{$}}
+\def\aldesendbr{\pmb{$\bigr\}$}}
+
+
+
+%index Definitionen
+\newcommand{\ii}[1]{{\it#1}}
+\newcommand{\nn}[1]{#1n}
+
+
+%%%%%%
+\def\theindex{ \cleardoublepage
+ \small
+ \columnseprule \z@
+ \columnsep=0.84cm
+ \twocolumn[\@makeschapterhead{Index}]
+ \addcontentsline{toc}{chapter}{Index}
+ \@mkboth{Index}{Index}
+ \thispagestyle{plain}\parindent\z@
+ \parskip\z@ plus .3pt\relax\let\item\@idxitem}
+\def\@idxitem{\par\hangindent 15pt}
+\def\subitem{\par\hangindent 15pt -- }
+\def\endtheindex{\clearpage}
+\def\indexspace{\par \vskip 10pt plus 5pt minus 3pt\relax}
+%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Diverse objects
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\def\NN{\bbbn}
+\def\ZZ{\bbbz}
+\def\RR{\bbbr}
+\def\CC{\bbbc}
+\def\QQ{\bbbq}
+
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Category Theory
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%\newcommand{\mod}[1]{\underline{#1}}
+\newcommand{\cat}[1]{{\cal #1}}
+\newcommand{\obj}[1]{{\rm Obj_{{\cal #1}}}}
+\newcommand{\mor}[2]{{\rm Mor_{{\cal #1}}(#2)}}
+
+\newcommand{\catofsets}{\mbox{\sf SET}}
+\newcommand{\typesetinterpr}[1]{{\bf T}_{#1}}
+\newcommand{\opinterprset}[1]{{\bf O}_{#1}}
+\newcommand{\rtypeass}[1]{{\Gamma}_{#1}}
+\newcommand{\rtypeasop}{\rtypeass{{\rm op}}}
+\newcommand{\flatsetn}{{{\bf F}_n}}
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Diverse mathematical notations
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\newcommand{\join}{\sqcup}
+\newcommand{\meet}{\sqcap}
+
+\def\com{\mathop{\rm com}}
+\def\mub{\mathop{\rm mub}}
+\def\CSMUB{\mathop{\rm CSMUB}}
+
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%%%%% TYPING
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\newcommand{\subtype}{\unlhd}
+\newcommand{\subtypepoly}{\widetile{\unlhd}}
+\newcommand{\subtypeaa}{\widetilde{\unlhd'}}
+\newcommand{\subtypeab}{\widetilde{\unlhd''}}
+\newcommand{\typeiso}{\mbox{$\unrhd\!\unlhd$}}
+%\newcommand{\subtimplied}{\leadsto}
+
+
+\def\tf#1{{\tt #1}}
+\def\cf#1{{\sf #1}}
+\def\ff#1{{\sl #1}}
+
+
+\newcommand{\lsb}{\langle\!\langle}
+\newcommand{\rsb}{\rangle\!\rangle}
+\newcommand{\sem}[1]{{\lsb #1 \rsb}}
+
+\newenvironment{progverb}{\begin{footnotesize}}{\end{footnotesize}}
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Index
+%%%%%%%%%%%%%%%%%%%%%%%%%%
+\def\seeterm{see}
+\def\alsoterm{see also}
+
+
+
+
+% This is PARSKIP.STY by H.Partl, TU Wien, as of 19 Jan 1989.
+% Document Style Option to be used with any style and with any size.
+% It produces the following Paragraph Layout:
+% Zero Parindent and non-zero Parskip. The stretchable glue in \parskip
+% helps LaTeX in finding the best place for page breaks.
+
+\parskip=0.5\baselineskip \advance\parskip by 0pt plus 2pt
+\parindent=\z@
+
+% To accompany this, the vertical spacing in the list environments is changed
+% to use the same as \parskip in all relevant places (for normalsize only):
+% \parsep = \parskip
+% \itemsep = \z@ % add nothing to \parskip between items
+% \topsep = \z@ % add nothing to \parskip before first item
+
+\def\@listI{\leftmargin\leftmargini
+ \topsep\z@ \parsep\parskip \itemsep\z@}
+\let\@listi\@listI
+\@listi
+
+\def\@listii{\leftmargin\leftmarginii
+ \labelwidth\leftmarginii\advance\labelwidth-\labelsep
+ \topsep\z@ \parsep\parskip \itemsep\z@}
+
+\def\@listiii{\leftmargin\leftmarginiii
+ \labelwidth\leftmarginiii\advance\labelwidth-\labelsep
+ \topsep\z@ \parsep\parskip \itemsep\z@}
+
+% Note that listiv, listv and listvi don't change vertical parameters.
+
+
+%New footnote environment
+% form jnsl.sty
+% but with 1.5em indent instead of 1em
+\long\def\@makefntext#1{\@setpar{\@@par\@tempdima \hsize
+ \advance\@tempdima-1.5em\parshape \@ne 1.5em\@tempdima}\par
+ \parindent 1.5em\noindent \hbox to \z@{\hss$^{\@thefnmark}$\ }#1}
+
+
+
+\makeatother
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Papersize
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%\textwidth 15.5cm
+%\topmargin 0.5cm
+%\evensidemargin 0.0cm
+%\oddsidemargin 0.5cm
+%\textheight 22cm
+%%%%%%%%%%%%%%%%%%%%%%%
+\textwidth 15.0cm
+\topmargin -0.9cm
+\evensidemargin 0.5cm
+\oddsidemargin 0.5cm
+\textheight 21.0cm
+
+
+
diff --git a/changelog b/changelog
index 436f766..3afa6a1 100644
--- a/changelog
+++ b/changelog
@@ -1,3 +1,10 @@
+20170702 tpd src/axiom-website/patches.html 20170702.01.tpd.patch
+20170702 tpd books/bookheader.tex add Weber thesis
+20170702 tpd books/bookvol10.1 add Weber thesis
+20170702 tpd books/bookvolbib add Weber thesis
+20170702 tpd books/bookvolbug add Weber thesis
+20170702 tpd books/dissdef.sty add Weber thesis
+20170702 tpd books/catmac.sty add Weber thesis
20170627 tpd src/axiom-website/patches.html 20170627.01.tpd.patch
20170627 tpd readme add June Huh quote
20170612 tpd src/axiom-website/patches.html 20170612.01.tpd.patch
diff --git a/patch b/patch
index eb560f3..b68b389 100644
--- a/patch
+++ b/patch
@@ -1,3 +1,3010 @@
-books/bookvol10.1 add Tutorial on Quantifier Elimination by Hong
+\index{Wolfram, Stephen}
+\begin{chunk}{axiom.bib}
+@book{Wolf91,
+ author = "Wolfram, Stephen"
+ title = "Mathematica: A System for Doing Mathematics by Computer",
+ publisher = "Addison-Wesley",
+ isbn = "978-0201515022",
+ year = "1991"
+}
-Goal: Axiom Literate Programming
+\end{chunk}
+
+\index{Char, Bruce}
+\index{Geddes, Keith O.}
+\index{Gonnet, Gaston H.}
+\index{Leong, Benton}
+\index{Monagan, Michael B.}
+\index{Watt, Stephen M.}
+\begin{chunk}{axiom.bib}
+@book{Char91,
+ author = "Char, Bruce and Geddes, Keith O. and Gonnet, Gaston H. and
+ Leong, Benton and Monagan, Michael B. and Watt, Stephen M.",
+ title = "Maple V Language Reference Manual",
+ publisher = "Springer",
+ year = "1991",
+ isbn = "978-0-387-94124-0"
+}
+
+\end{chunk}
+
+\index{Jenks, Richard D.}
+\begin{chunk}{axiom.bib}
+@techreport{Jenk70,
+ author = "Jenks, Richard D.",
+ title = "META/LISP: An interactive translator writing system",
+ type = "Research Report",
+ number = "RC 2968",
+ institution = "IBM Research",
+ year = "1970",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Smolka, Gert}
+\begin{chunk}{axiom.bib}
+@article{Smol88,
+ author = "Smolka, Gert",
+ title = "Logic Programming with Polymorphically Order-Sorted Types",
+ journal = "Lecture Notes in Computer Science",
+ volume = "343",
+ pages = "53-70",
+ year = "1988",
+ abstract =
+ "This paper presents the foundations for relational logic programming
+ with polymorphically order-sorted data types. This type discipline
+ combines the notion of parametric polymorphism [Milner 78], which has
+ been developed for higher-order functional programming [Harper et aL 86],
+ with the notion of order-sorted typing [Goguen 78, Smolka etal. 87],
+ which has been developed for equational first-order specification
+ and programming [Fatatsugi et al. 85]. Both notions are important
+ for practical reasons. With parametric polymorphism one avoids the
+ need for redefining lists and other parametric data types for every
+ type they are used with. Subsorts not only provide for more natural
+ type specifications, but also yield more computational power:
+ variables can be constrained to sorts rather than to single values and
+ typed unification computes directly with sort constraints, thus
+ reducing the need for expensive backtracking."
+ paper = "Smol88.pdf"
+}
+
+\end{chunk}
+
+\index{Fruehwirth, Thom}
+\index{Shapiro, Ehud}
+\index{Vardi, Moshe Y.}
+\index{Yardeni, Eyal}
+\begin{chunk}{axiom.bib}
+@inproceedings{Frue91,
+ author = "Fruehwirth, Thom and Shapiro, Ehud and Vardi, Moshe Y. and
+ Yardeni, Eyal",
+ title = "Logic programs as types for logic programs",
+ booktitle = "Proc. Sixth Annual IEEE Symp. on Logic in Comp. Sci.",
+ publisher = "IEEE",
+ pages = "300-309",
+ year = "1991",
+ abstract =
+ "Type checking can be extremely useful to the program development process.
+ Of particular interest are descriptive type systems, which let the
+ programmer write programs without having to define or mention types.
+ We consider here optimistic type systems for logic programs. In such
+ systems types are conservative approximations to the success set of the
+ program predicates. We propose the use of logic programs to describe
+ types. We argue that this approach unifies the denotational and
+ operational approaches to descriptive type systems and is simpler
+ and more natural than previous approaches. We focus on the use of
+ unary-predicate programs to describe types. We identify a proper class
+ of unary-predicate programs and show that it is expressive enough to
+ express several notions of types. We use an analogy with 2-way automata
+ and a correspondence with alternating algorithms to obtain a complexity
+ characterization of type inference and type checking. This
+ characterization was facilitated by the use of logic programs to
+ represent types.",
+ paper = "Frue91.pdf"
+}
+
+\end{chunk}
+
+\index{Kifer, Michael}
+\index{Wu, James}
+\begin{chunk}{axiom.bib}
+@inproceedings{Kife91,
+ author = "Kifer, Michael and Wu, James",
+ title = "A First-order Theory of Types and Polymorphism in Logic
+ Programming",
+ booktitle = "Proc Sixth Annual IEEE Symp. on Logic in Comp. Sci.",
+ year = "1991",
+ pages = "310-321",
+ abstract =
+ "A logic called typed predicate calculus (TPC) that gives declarative
+ meaning to logic programs with type declarations and type inference is
+ introduced. The proper interaction between parametric and inclusion
+ varieties of polymorphism is achieved through a construct called type
+ dependency, which is analogous to implication types but yields more
+ natural and succinct specifications. Unlike other proposals where
+ typing has extra-logical status, in TPC the notion of type-correctness
+ has precise model-theoretic meaning that is independent of any
+ specific type-checking or type-inference procedure. Moreover, many
+ different approaches to typing that were proposed in the past can be
+ studied and compared within the framework of TPC. Another novel
+ feature of TPC is its reflexivity with respect to type declarations;
+ in TPC, these declarations can be queried the same way as any other
+ data. Type reflexivity is useful for browsing knowledge bases and,
+ potentially, for debugging logic programs.",
+ paper = "Kife91.pdf"
+}
+
+\end{chunk}
+
+\index{Butler, Greg}
+\index{Cannon, John}
+\begin{chunk}{axiom.bib}
+@inproceedings{Butl90,
+ author = "Butler, Greg and Cannon, John",
+ title = "The Design of Cayley -- A Language for Modern Algebra",
+ booktitle = "DISCO 1990",
+ year = "1990",
+ pages = "10-19",
+ abstract =
+ "Established practice in the domain of modern algebra has shaped the
+ design of Cayley. The design has also been responsive to the needs of
+ its users. The requirements of the users include consistency with
+ common mathematical notation; appropriate data types such as sets,
+ sequences, mappings, algebraic structures and elements; efficiency;
+ extensibility; power of in-built functions and procedures for known
+ algorithms; and access to common examples of algebraic structures. We
+ discuss these influences on the design of Cayley's user language."
+ paper = "Butl90.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Kaliszyk, Cezary}
+\index{Wiedijk, Freek}
+\begin{chunk}{axiom.bib}
+@article{Kali07,
+ author = "Kaliszyk, Cezary and Wiedijk, Freek",
+ title = "Certified Computer Algebra on Top of an Interactive Theorem
+ Prover",
+ journal = "LNAI",
+ volume = "4573",
+ pages = "94-105",
+ year = "2007",
+ abstract =
+ "We present a prototype of a computer algebra system that is built on
+ top of a proof assistant, HOL Light. This architecture guar- antees
+ that one can be certain that the system will make no mistakes. All
+ expressions in the system will have precise semantics, and the proof
+ assistant will check the correctness of all simplifications according
+ to this semantics. The system actually {\sl proves} each simplification
+ performed by the computer algebra system.
+
+ Although our system is built on top of a proof assistant, we designed
+ the user interface to be very close in spirit to the interface of
+ systems like Maple and Mathematica. The system, therefore, allows the
+ user to easily probe the underlying automation of the proof assistant
+ for strengths and weaknesses with respect to the automation of
+ mainstream computer algebra systems. The system that we present is a
+ prototype, but can be straightforwardly scaled up to a practical
+ computer algebra system",
+ paper = "Kali07.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Chapman, Peter}
+\index{McKinna, James}
+\index{Urban, Christian}
+\begin{chunk}{axiom.bib}
+@article{Chap08,
+ author = "Chapman, Peter and McKinna, James and Urban, Christian",
+ title = "Mechanising a Proof of Craig's Interpoolation Theorem for
+ Intuitionistic Logic in Nominal Isabelle",
+ journal = "LNAI",
+ volume = "5144",
+ year = "2008",
+ pages = "38-52",
+ abstract =
+ "Craig's Interpolation Theorem is an important meta-theoretical result
+ for several logics. Here we describe a formalisation of the result for
+ first-order intuitionistic logif without function symbols or equality,
+ with the intention of giving insight into how other such results in
+ proof theory might be mechanically verified, notable cut-admissibility.
+ We use the package {\sl Nominal Isabelle}, which easily deals with the
+ binding issues in the quantifier cases of the proof",
+}
+
+\end{chunk}
+
+\index{Lobachev, Oleg}
+\index{Loogen, Rita}
+\begin{chunk}{axiom.bib}
+@article{Loba08,
+ author = "Lobachev, Oleg and Loogen, Rita",
+ title = "Towards an Implementation of a Computer Algebra System in a
+ Functional Language",
+ journal = "LNAI",
+ year = "2008",
+ pages = "141-154",
+ abstract =
+ "This paper discusses the pros and cons of using a functional language
+ for implementing a computer algebra system. The contributions of the
+ paper are twofold. Firstly, we discuss some language-centered design
+ aspects of a computer algebra system -- the ``language unity''
+ concept. Secondly, we provide an implementation of a fast polynomial
+ multiplication algorithm, which is one of the core elements of a
+ computer algebra system. The goal of the paper is to test the
+ feasibility of an implementation of (some elements of) a computer
+ algebra system in a modern functional language.",
+ paper = "Loba08.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Butler, Greg}
+\begin{chunk}{axiom.bib}
+@article{Butl96,
+ author = "Butler, Greg",
+ title = "Software Architectures for Computer Algebra: A Case Study",
+ journal = "DISCO 96",
+ year = "1996",
+ pages = "277-285",
+ abstract =
+ "The architectures of the existing computer algebra systems have not
+ been discussed sufficiently in the literature. Instead, the focus
+ has been on the design of the related programming language, or the
+ design of a few key data structures.
+
+ We address this deficiency with a case study of the architecture of
+ CAYLEY. Our aim is twofold: to capture this knowledge before the total
+ passing of a system now made obsolete by MAGMA; and to encourage others
+ to describe the architecture of the computer algebra systems with which
+ they are familiar.
+
+ The long-term goal is a better understanding of how to construct
+ computer algebra systems in the future.",
+ paper = "Butl96.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Mosses, Peter D.}
+\begin{chunk}{axiom.bib}
+@article{Moss93,
+ author = "Mosses, Peter D.",
+ title = "The Use of Sorts in Algebraic Specifications",
+ journal = "Recent Trends in Data Type Specification"
+ year = "1993",
+ pages = "66-91",
+ abstract =
+ "Algebraic specification frameworks exploit a variety of sort
+ disciplines. The treatment of sorts has a considerable influence on
+ the ease with which such features as partially and polymorhism can be
+ specified. This survey gives an accessible overview of various
+ frameworks, focusing on their sort disciplines and assessing their
+ strengths and weaknesses for practical applications. Familiarity with
+ the basic notions of algebraic specification is assumed.",
+ paper = "Moss93.pdf"
+}
+
+\end{chunk}
+
+\index{Kredel, Heinz}
+\begin{chunk}{axiom.bib}
+@article{Kred08,
+ author = "Kredel, Heinz",
+ title = "Unique Factorization Domains in the Java Computer Algebra System",
+ journal = "Automated Deduction in Geometry",
+ year = "2008",
+ pages = "86-115",
+ abstract =
+ "This paper describes the implementation of recursive algorithms in
+ unique factorization domains, namely multivariate polynomial greatest
+ common divisors (gcd) and factorization into irreducible parts in the
+ Java computer algebra library (JAS). The implementation of gcds,
+ resultants and factorization is part of the essential building blocks
+ for any computation in algebraic geometry, in particular in automated
+ deduction in geometry. There are various implementations of these
+ algorithms in procedural programming languages. Our aim is an
+ implementation in a modern object oriented programming language with
+ generic data types, as it is provided by Java programming language. We
+ exemplify that the type design and implementation of JAS is suitable
+ for the implementation of several greatest common divisor algorithms
+ and factorization of multivariate polynomials. Due to the design we
+ can employ this package in very general settings not commonly seen in
+ other computer algebra systems. As for example, in the coefficient
+ arithmetic for advanced Gröbner basis computations like in polynomial
+ rings over rational function fields or (finite, commutative) regular
+ rings. The new package provides factory methods for the selection of
+ one of the several implementations for non experts. Further we
+ introduce a parallel proxy for gcd implementations which runs
+ different implementations concurrently.",
+ paper = "Kred08,pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Harrison, John}
+\index{Thery, Laurent}
+\begin{chunk}{axiom.bib}
+@article{Harr94,
+ author = "Harrison, John and Thery, Laurent",
+ title = "Extending the HOL theorem prover with a computer algebra
+ system to reason about the reals",
+ journal = "Lecture Notes in Computer Science",
+ volume = "780",
+ pages = "174-184",
+ year = "2005",
+ abstract =
+ "In this paper we describe an environment for reasoning about the reals
+ which combines the rigour of a theorem prover with thw power of a
+ computer algebra system."
+ paper = "Harr94.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Medina-Bulo, I.}
+\index{Palomo-Lozano, F.}
+\index{Alonso-Jimenez, J.A.}
+\index{Ruiz-Reina, J.L.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Medi04,
+ author = "Medina-Bulo, I. and Palomo-Lozano, F. and Alonso-Jimenez, J.A.
+ and Ruiz-Reina, J.L.",
+ title = "Verified Computer Algebra in ACL2",
+ booktitle = "ISSAC 04, LNCS Volume 3249",
+ year = "2004",
+ pages = "171-184",
+ abstract =
+ "In this paper, we present the formal verification of a COMMON LISP
+ implementation of Buchberger's algorithm for computing Groebner bases
+ of polynomial ideals. This work is carried out in the ACL2 system and
+ shows how verified Computer Algebra can be achieved in an executable
+ logic.",
+ paper = "Medi04.pdf"
+}
+
+\end{chunk}
+
+\index{Davenport, James H.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Dave08,
+ author = "Davenport, James H."
+ title = "Effective Set Membership in Computer Algebra and Beyond",
+ booktitle = "Int. Conf. on Intelligent Computer Mathematics",
+ pages = "266-269",
+ year = "2008",
+ journal = "LNCS",
+ volume = "5144",
+ abstract =
+ "In previous work we showed the importance of distinguishing ``I know
+ $X\ne Y$'' from ``I don't know that $X=Y$''. IN this paper we look at
+ effective set membership, starting with Groebner bases, where the
+ issues are well-expressed in algebra systems, and going on to integration
+ and other questions of 'computer calculus'.
+
+ In particular, we claim that a better recognition of the role of set
+ membership would clarify some features of computer algebra systems,
+ such as 'what does an integral mean as output'.",
+ paper = "Dave08.pdf"
+}
+
+\end{chunk}
+
+\index{Ueberberg, Johannes}
+\begin{chunk}{axiom.bib}
+@inproceedings{Uebe94,
+ author = "Ueberberg, Johannes",
+ title = "Interactive theorem proving and computer algebra",
+ booktitle = "AISMC 94",
+ year = "1994",
+ pages = "1-9",
+ paper = "Uebe94.pdf"
+}
+
+\end{chunk}
+
+\index{Jolly, Raphael}
+\begin{chunk}{axiom.bib}
+@inproceedings{Joll13,
+ author = "Jolly, Raphael",
+ title = "Categories as Type Classes in the Scala Algebra System",
+ booktitle = "CASC 2013",
+ year = "2013",
+ pages = "209-218",
+ journal = "LNCS",
+ volume = "8136",
+ abstract =
+ "A characterization of the categorical view of computer algebra is
+ proposed. Some requirements on the ability for abstraction that
+ programming languages must have in order to allow a categorical
+ approach is given. Object-oriented inheritance is presented as a
+ suitable abstraction scheme and exemplified by the Java Algebra
+ System. Type classes are then introduced as an alternate abstraction
+ scheme and shown to be eventually better suited for modeling
+ categories. Pro and cons of the two approaches are discussed and a
+ hybrid solution is exhibited.",
+ paper = "Joll13.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Aladjev, Victor}
+\begin{chunk}{axiom.bib}
+@inproceedings{Alad03,
+ author = "Aladjev, Victor",
+ title = "Computer Algebra System Maple: A New Software Library",
+ booktitle = "ICCS 2003",
+ year = "2003",
+ pages = "711-717",
+ journal = "LNCS",
+ volume = "2657",
+ abstract =
+ "The paper represents Maple library containing more than 400
+ procedures expanding possibilities of the Maple package of releases
+ 6,7 and 8. The library is structurally organized similarly to the main
+ Maple library. The process of the library installing is simple enough
+ as a result of which the above library will be logically linked with
+ the main Maple library, supporting access to software located in it
+ equally with standard Maple software. The demo library is delivered
+ free of charge at request to addresses mentioned above.",
+ paper = "Alad03.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Jackson, Paul}
+\begin{chunk}{axiom.bib}
+@inproceedings{Jack94,
+ author = "Jackson, Paul",
+ title = "Exploring abstract algebra in constructive type theory",
+ booktitle = "CADE 1994",
+ year = "1994",
+ pages = "590-604",
+ journal = "LNCS",
+ volume = "814",
+ abstract =
+ "I describe my implementation of computational abstract algebra in the
+ Nuprl system. I focus on my development of multivariate polynomials. I
+ show how I use Nuprl's expressive type theory to define classes of
+ free abelian monoids and free monoid algebras. These classes are
+ combined to create a class of all implementations of polynomials. I
+ discuss the issues of subtyping and computational content that came up
+ in designing the class definitions. I give examples of relevant theory
+ developments, tactics and proofs. I consider how Nuprl could act as an
+ algebraic ‘oracle’ for a computer algebra system and the relevance of
+ this work for abstract functional programming.",
+ paper = "Jack94.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{van Hulzen, J. A.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Hulz82,
+ author = "van Hulzen, J. A.",
+ title = "Computer algebra systems viewed by a notorious user",
+ booktitle = "EUROCAM 1982",
+ pages "166-180",
+ year = "1982",
+ journal = "LNCS",
+ volume = "144",
+ abstract =
+ "Are design and use of computer algebra systems disjoint or
+ complementary activities? Raising and answering this question are
+ equally controversial, since a clear distinction between languages
+ features and library facilities is hard to make. Instead of even
+ attempting to answer this rather academic question it is argued why it
+ is reasonable to raise related questions: Is SMP a paradox? Is it
+ realistic to neglect inaccurate input data? Is a very high level
+ programming language instrumental for equal opportunity employment in
+ scientific research?",
+ paper = "Hulz82.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Caviness, B.F.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Cavi85,
+ author = "Caviness, B.F.",
+ title = "Computer Algebra: Past and Future",
+ booktitle = "EUROCAL 1985",
+ year = "1985",
+ pages = "1-18",
+ journal = "LNCS",
+ volume = "203",
+ abstract =
+ "The preceding just touches on some of the highlights of the
+ accomplishments and unsolved problems in computer algebra. A really
+ comprehensive survey would be much too long for the space available
+ here. I close with the following quote, which has been attributed to
+ Albert Einstein and helps, perhaps, to keep a proper prospective on
+ our work: 'The symbolic representation of abstract entities is doomed
+ to its rightful place of relative insignificance in a world in which
+ flowers and beautiful women abound.'",
+ paper = "Cavi85.pdf"
+}
+
+\end{chunk}
+
+\index{Schreiner, Wolfgang}
+\index{Danielczyk-Landerl, Werner}
+\index{Marin, Mircea}
+\index{Stocher, Wolfgang}
+\begin{chunk}{axiom.bib}
+@inproceedings{Schr00,
+ author = "Schreiner, Wolfgang and Danielczyk-Landerl, Werner and
+ Marin, Mircea and Stocher, Wolfgang",
+ title = "A Generic Programming Environment for High-Performance
+ Mathematical Libraries",
+ booktitle = "Generic Programming",
+ year = "2000",
+ pages = "256-267",
+ journal = "LNCS",
+ volume = "1766",
+ abstract =
+ "We report on a programming environment for the development of generic
+ mathematical libraries based on functors (parameterized modules) that
+ have rigorously specified but very abstract interfaces. We focus on
+ the combination of the functor-based programming style with software
+ engineering principles in large development projects. The generated
+ target code is highly efficient and can be easily embedded into
+ foreign application environments.",
+ paper = "Schr00.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Magaud, Nicolas}
+\index{Narboux, Julien}
+\index{Schreck, Pascal}
+\begin{chunk}{axiom.bib}
+@inproceedings{Maga08,
+ author = "Magaud, Nicolas and Narboux, Julien and Schreck, Pascal",
+ title = "Formalizing Project Plane Geometry in Coq",
+ booktitle = "Automated Deduction in Geometry",
+ year = "2008",
+ pages = "141-162",
+ journal = "LNCS",
+ volume = "6301",
+ abstract =
+ "We investigate how projective plane geometry can be formalized in a
+ proof assistant such as Coq. Such a formalization increases the
+ reliability of textbook proofs whose details and particular cases are
+ often overlooked and left to the reader as exercises. Projective plane
+ geometry is described through two different axiom systems which are
+ formally proved equivalent. Usual properties such as decidability of
+ equality of points (and lines) are then proved in a constructive
+ way. The duality principle as well as formal models of projective
+ plane geometry are then studied and implemented in Coq. Finally, we
+ formally prove in Coq that Desargues’ property is independent of the
+ axioms of projective plane geometry.",
+ paper = "Maga08.pdf"
+}
+
+\end{chunk}
+
+\index{Fevre, Stephane}
+\index{Wang, Dongming}
+\begin{chunk}{axiom.bib}
+@inproceedings{Fevr98,
+ author = "Fevre, Stephane and Wang, Dongming",
+ title = "Proving Geometric Theorems using Clifford Algebra and
+ Rewrite Rules",
+ booktitle = "Conf. on Automated Deduction",
+ year = "1998",
+ pages = "17-32",
+ journal = "LNCS",
+ volume = "1421",
+ abstract =
+ "We consider geometric theorems that can be stated constructively by
+ introducing points, while each newly introduced point may be
+ represented in terms of the previously constructed points using
+ Clifford algebraic operators. To prove a concrete theorem, one first
+ substitutes the expressions of the dependent points into the
+ conclusion Clifford polynomial to obtain an expression that involves
+ only the free points and parameters. A term-rewriting system is
+ developed that can simplify such an expression to 0, and thus prove
+ the theorem. A large class of theorems can be proved effectively in
+ this coordinate-free manner. This paper describes the method in detail
+ and reports on our preliminary experiments.",
+ paper = "Fevr98.pdf"
+}
+
+\end{chunk}
+
+\index{Kerber, Manfred}
+\index{Kohlhase, Michael}
+\index{Sorge, Volker}
+\begin{chunk}{axiom.bib}
+@article{Kerb98,
+ author = "Kerber, Manfred and Kohlhase, Michael and Sorge, Volker",
+ title = "Integrating Computer Algebra with Proof Planning",
+ journal = "Journal of Automated Reasoning",
+ volume = "21",
+ number = "3",
+ pages = "327-355",
+ year = "1998",
+ abstract =
+ "Mechanized reasoning systems and computer algebra systems have
+ different objectives. Their integration is highly desirable, since
+ formal proofs often involve both of the two different tasks proving
+ and calculating. Even more important, proof and computation are often
+ interwoven and not easily separable.
+
+ In this article we advocate an integration of computer algebra into
+ mechanized reasoning systems at the proof plan level. This approach
+ allows us to view the computer algebra algorithms as methods, that is,
+ declarative representations of the problem-solving knowledge specific
+ to a certain mathematical domain. Automation can be achieved in many
+ cases by searching for a hierarchic proof plan at the method level by
+ using suitable domain-specific control knowledge about the
+ mathematical algorithms. In other words, the uniform framework of
+ proof planning allows us to solve a large class of problems that are
+ not automatically solvable by separate systems.
+
+ Our approach also gives an answer to the correctness problems inherent
+ in such an integration. We advocate an approach where the computer
+ algebra system produces high-level protocol information that can be
+ processed by an interface to derive proof plans. Such a proof plan in
+ turn can be expanded to proofs at different levels of abstraction, so
+ the approach is well suited for producing a high-level verbalized
+ explication as well as for a low-level, machine-checkable,
+ calculus-level proof.
+
+ We present an implementation of our ideas and exemplify them using an
+ automatically solved example.",
+ paper = "Kerb98.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Loos, Ruediger G. K.}
+\begin{chunk}{axiom.bib}
+@article{Loos74,
+ author = "Loos, Ruediger G. K.",
+ title = "Toward a Formal Implementation of Computer Algebra",
+ journal = "SIGSAM",
+ volume = "8",
+ number = "3",
+ pages = "9-16",
+ year = "1974",
+ abstract =
+ "We consider in this paper the task of synthesizing an algebraic
+ system. Today the task is significantly simpler than in the pioneer
+ days of symbol manipulation, mainly because of the work done by the
+ pioneers in our area, but also because of the progress in other areas
+ of Computer Science. There is now a considerable collection of
+ algebraic algorithms at hand and a much better understanding of data
+ structures and programming constructs than only a few years ago.",
+ paper = "Loos74.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Gries, David}
+\begin{chunk}{axiom.bib}
+@book{Grie78,
+ author = "Gries, David",
+ title = "Programmnig Methodology",
+ publisher = "Springer-Verlag",
+ year = "1978"
+}
+
+\end{chunk}
+
+\index{Wirsing, Martin}
+\begin{chunk}{axiom.bib}
+@InCollection{Wirs91,
+ author = "Wirsing, Martin",
+ title = "Algebraic Specification",
+ booktitle = "Handbook of Theoretical Computer Science (Vol B)",
+ publisher = "MIT Press",
+ year = "1991",
+ pages = "675-788",
+ chapter = "13",
+ isbn = "0-444-88074-7"
+}
+
+\end{chunk}
+
+\index{Jones, Simon Peyton}
+\begin{chunk}{axiom.bib}
+@techreport{Jone98,
+ author = "Jones, Simon Peyton",
+ title = "The Haskell 98 Language and LIbraries. The Revised Report",
+ institution = "Cambridge University Press",
+ year = "1998",
+ type = "technical report",
+ link = "\url{https://www.haskell.org/definition/haskell98-report.pdf}"
+}
+
+\end{chunk}
+
+\index{Aho, Alfred V.}
+\index{Sethi, Ravi}
+\index{Ullman, Jeffrey D.}
+\begin{chunk}{axiom.bib}
+@book{Ahox86,
+ author = "Aho, Alfred V. and Sethi, Ravi and Ullman, Jeffrey D.",
+ title = "Compilers: Principles, Techniques, and Tools",
+ year = "1986",
+ publisher = "Addison-Wesley",
+ isbn = "978-0201100884"
+}
+
+\end{chunk}
+
+\index{Mitchell, John C.}
+\begin{chunk}{axiom.bib}
+@InCollection{Mitc91,
+ author = "Mitchell, John C.",
+ title = "Type Systems for Programming Languages",
+ booktitle = "Handbook of Theoretical Computer Science (Vol B.)".
+ pages = "365-458",
+ year = "1991",
+ publisher = "MIT Press",
+ isbn = "0-444-88074-7"
+}
+
+\end{chunk}
+
+\index{Strachey, Christopher}
+\begin{chunk}{axiom.bib}
+@article{Stra00,
+ author = "Strachey, Christopher",
+ title = "Fundamental Concepts in Programming Languages",
+ journal = "Higher-Order and Symbolic Computation",
+ volume = "13",
+ number = "1-2",
+ pages = "11-49",
+ year = "2000"
+ abstract =
+ "This paper forms the substance of a course of lectures given at the
+ International Summer School in Computer Programming at Copenhagen in
+ August, 1967. The lectures were originally given from notes and the
+ paper was written after the course was finished. In spite of this, and
+ only partly because of the shortage of time, the paper still retains
+ many of the shortcomings of a lecture course. The chief of these are
+ an uncertainty of aim—it is never quite clear what sort of audience
+ there will be for such lectures—and an associated switching from
+ formal to informal modes of presentation which may well be less
+ acceptable in print than it is natural in the lecture room. For these
+ (and other) faults, I apologise to the reader.
+
+ There are numerous references throughout the course to CPL [1–3]. This
+ is a programming language which has been under development since 1962
+ at Cambridge and London and Oxford. It has served as a vehicle for
+ research into both programming languages and the design of
+ compilers. Partial implementations exist at Cambridge and London. The
+ language is still evolving so that there is no definitive manual
+ available yet. We hope to reach another resting point in its evolution
+ quite soon and to produce a compiler and reference manuals for this
+ version. The compiler will probably be written in such a way that it
+ is relatively easyto transfer it to another machine, and in the first
+ instance we hope to establish it on three or four machines more or
+ less at the same time.
+
+ The lack of a precise formulation for CPL should not cause much
+ difficulty in this course, as we are primarily concerned with the
+ ideas and concepts involved rather than with their precise
+ representation in a programming language.",
+ paper = "Stra00.pdf"
+}
+
+\end{chunk}
+
+\index{Goguen, Joseph}
+\index{Meseguer, Jose}
+\begin{chunk}{axiom.bib}
+@techreport{Gogu89,
+ author = "Goguen, Joseph and Meseguer, Jose",
+ title = "Order-sorted Algebra I : Equational Deduction for Multiple
+ Inheritance, Overloading, Exceptions, and Partial Operations",
+ type = "technical report",
+ institution = "SRI International",
+ year = "1989"
+ number = "SRIR 89-10"
+}
+
+\end{chunk}
+
+\index{Hindley, R.}
+\begin{chunk}{axiom.bib}
+@article{Hind69,
+ author = "Hindley, R.",
+ title = "The Principal Type-Scheme of an Object in Combinatory Logic",
+ journal = "Trans. AMS",
+ volume = "146",
+ year = "1969",
+ pages = "29-60",
+ paper = "Hind69.pdf"
+}
+
+\end{chunk}
+
+\index{Milner, Robin}
+\begin{chunk}{axiom.bib}
+@article{Miln78,
+ author = "Milner, Robin",
+ title = "A Theory of Type Polymorphism in Programming",
+ journal = "J. Computer and System Sciences",
+ volume = "17",
+ number = "3",
+ year = "1978",
+ pages = "348-375",
+ abstract =
+ "The aim of this work is largely a practical one. A widely employed
+ style of programming, particularly in structure-processing languages
+ which impose no discipline of types, entails defining procedures which
+ work well on objects of a wide variety. We present a formal type
+ discipline for such polymorphic procedures in the context of a simple
+ programming language, and a compile time type-checking algorithm which
+ enforces the discipline. A Semantic Soundness Theorem (based on a
+ formal semantics for the language) states that well-type programs
+ cannot “go wrong” and a Syntactic Soundness Theorem states that if
+ accepts a program then it is well typed. We also discuss extending
+ these results to richer languages; a type-checking algorithm based on
+ is in fact already implemented and working, for the metalanguage ML in
+ the Edinburgh LCF system.",
+ paper = "Miln78.pdf"
+}
+
+\end{chunk}
+
+\index{Damas, Luis}
+\index{Milner, Robin}
+\begin{chunk}{axiom.bib}
+@inproceedings{Dama82,
+ author = "Damas, Luis and Milner, Robin",
+ title = "Principal Type-schemes for Functional Programs",
+ booktitle = "POPL 82",
+ pages = "207-212",
+ year = "1982",
+ isbn = "0-89798-065-6",
+ paper = "Dama82.pdf"
+}
+
+\end{chunk}
+
+\index{Milner, R.}
+\index{Torte, M.}
+\index{Harper, R.}
+\begin{chunk}{axiom.bib}
+@book{Miln90,
+ author = "Milner, Robin and Torte, Mads and Harper, Robert",
+ title = "The Definition of Standard ML",
+ publisher = "Lab for Foundations of Computer Science, Univ. Edinburgh",
+ link = "\url{http://sml-family.org/sml90-defn.pdf}",
+ year = "1990",
+ paper = "Miln90.pdf"
+}
+
+\end{chunk}
+
+\index{Milner, R.}
+\index{Torte, M.}
+\begin{chunk}{axiom.bib}
+@book{Miln91,
+ author = "Milner, Robin and Torte, Mads",
+ title = "Commentary on Standard ML",
+ publisher = "Lab for Foundations of Computer Science, Univ. Edinburgh",
+ link = "\url{https://pdfs.semanticscholar.org/d199/16cbbda01c06b6eafa0756416e8b6f15ff44.pdf}",
+ year = "1991",
+ paper = "Miln91.pdf"
+}
+
+\end{chunk}
+
+\index{Turner, D.A.}
+\begin{chunk}{axiom.bib}
+@article{Turn85,
+ author = "Turner, D. A.",
+ title = "Miranda: A non-strict functional language with polymorphic types",
+ journal = "Lecture Notes in Computer Science",
+ volume = "201",
+ pages = "1-16",
+ year = "1985",
+ link = "\url{http://miranda.org.uk/nancy.html}",
+ paper = "Turn85.pdf"
+}
+
+\end{chunk}
+
+\index{Turner, D.A.}
+\begin{chunk}{axiom.bib}
+@article{Turn86,
+ author = "Turner, D. A.",
+ title = "An Overview of Miranda",
+ journal = "SIGPLAN Notices",
+ volume = "21",
+ number = "12",
+ pages = "158-166",
+ year = "1986",
+ link = "\url{http://miranda.org.uk/}",
+ paper = "Turn86.pdf"
+}
+
+\end{chunk}
+
+\index{Foderaro, John K.}
+\begin{chunk}{axiom.bib}
+@phdthesis{Fode83,
+ author = "Foderaro, John K.",
+ title = "The Design of a Language for Algebraic Computation Systems",
+ school = "U.C. Berkeley, EECS Dept.",
+ year = "1983",
+ link = "\url{http://digitalassets.lib.berkeley.edu/techreports/ucb/text/CSD-83-160.pdf}",
+ abstract =
+ "This thesis describes the design of a language to support a
+ mathematics-oriented symbolic algebra system. The language, which we
+ have named NEWSPEAK, permits the complex interrelations of
+ mathematical types, such as rings, fields and polynomials to be
+ described. Functions can be written over the most general type that
+ has the required operations and properties and the inherited by
+ subtypes. All function calls are generic, with most function
+ resolution done at compile time. Newspeak is type-safe, yet permits
+ runtime creation of tyhpes.",
+ paper = "Fode83.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Fuh, You-Chin}
+\index{Mishra, Prateek}
+\begin{chunk}{axiom.bib}
+@article{Fuhx90,
+ author = "Fuh, You-Chin",
+ title = "Type Inference with Subtypes",
+ journal = "Theoretical Computer Science",
+ volume = "73",
+ number = "2",
+ year = "1990",
+ pages = "155-175",
+ abstract =
+ "We extend polymorphic type inference with a very general notion of
+ subtype based on the concept of type transformation. This paper
+ describes the following results. We prove the existence of (i)
+ principal type property and (ii) syntactic completeness of the
+ type-checker, for type inference with subtypes. This result is
+ developed with only minimal assumptions on the underlying theory of
+ subtypes. As a consequence, it can be used as the basis for type
+ inference with a broad class of subtype theories. For a particular
+ “structural” theory of subtypes, those engendered by inclusions
+ between type constants only, we show that principal types are
+ compactly expressible. This suggests that type inference for the
+ structured theory of subtypes is feasible. We describe algorithms
+ necessary for such a system. The main algorithm we develop is called
+ MATCH, an extension to the classical unification algorithm. A proof of
+ correctness for MATCH is given.",
+ paper = "Fuhx90.pdf"
+}
+
+\end{chunk}
+
+\index{Nipkow, Tobias}
+\index{Snelting, Gregor}
+\begin{chunk}{axiom.bib}
+@inproceedings{Nipk91,
+ author = "Nipkow, Tobias and Snelting, Gregor",
+ title = "Type Classes and Overloading Resolution via Order-Sorted
+ Unification",
+ booktitle = "Proc 5th ACM Conf. Functional Prog. Lang. and Comp. Arch.",
+ year = "1991",
+ publisher = "Springer",
+ journal = "LNCS",
+ volume = "523",
+ pages = "1-14",
+ abstract =
+ "We present a type inference algorithm for a Haskell-like language
+ based on order-sorted unification. The language features polymorphism,
+ overloading, type classes and multiple inheritance. Class and instance
+ declarations give rise to an order-sorted algebra of types. Type
+ inference essentially reduces to the Hindley/Milner algorithm where
+ unification takes place in this order-sorted algebra of types. The
+ theory of order-sorted unification provides simple sufficient
+ conditions which ensure the existence of principal types. The
+ semantics of the language is given by a translation into ordinary
+ lambda-calculus. We prove the correctness of our type inference
+ algorithm with respect to this semantics."
+}
+
+\end{chunk}
+
+\index{Schmidt-Schauss, M.}
+\begin{chunk}{axiom.bib}
+@book{Schm89,
+ author = "Schmidt-Schauss, M.",
+ title = "Computational Aspects of an Order-Sorted Logic with Term
+ Declarations",
+ publisher = "Springer",
+ isbn = "978-3-540-51705-4",
+ year = "1989"
+}
+
+\end{chunk}
+
+\index{Smolka, G.}
+\index{Nutt, W.}
+\index{Goguen, J.}
+\index{Meseguer, J.}
+\begin{chunk}{axiom.bib}
+@InCollection{Smol89,
+ author = "Smolka, G. and Nutt, W. and Goguen, J. and Meseguer, J.",
+ title = "Order-sorted Equational Computation",
+ booktitle = "Resolution of Equations in Algebra Structures (Vol 2)",
+ pages = "297-367",
+ year = "1989"
+}
+
+\end{chunk}
+
+\index{Waldmann, Uwe}
+\begin{chunk}{axiom.bib}
+@article{Wald92,
+ author = "Waldmann, Uwe",
+ title = "Semantics of Order-sorted Specifications",
+ journal = "Theoretical Computer Science",
+ volume = "94",
+ number = "1-2",
+ year = "1992",
+ pages = "1-35",
+ abstract =
+ "Order-sorted specifications (i.e. many-sorted specifications with
+ subsort relations) have been proved to be a useful tool for the
+ description of partially defined functions and error handling in
+ abstract data types.
+
+ Several definitions for order-sorted algebras have been proposed. In
+ some papers an operator symbol, which may be multiply declared, is
+ interpreted by a family of functions (“overloaded” algebras). In other
+ papers it is always interpreted by a single function (“nonoverloaded”
+ algebras). On the one hand, we try to demonstrate the differences
+ between these two approaches with respect to equality, rewriting and
+ completion; on the other hand, we prove that in fact both theories can
+ be studied in parallel provided that certain notions are suitably
+ defined.
+
+ The overloaded approach differs from the many-sorted and the
+ nonoverloaded one in that the overloaded term algebra is not
+ necessarily initial. We give a decidable sufficient criterion for the
+ initiality of the term algebra, which is less restrictive than
+ GJM-regularity as proposed by Goguen, Jouannaud and Meseguer.
+
+ Sort-decreasingness is an important property of rewrite systems since
+ it ensures that confluence and Church-Rosser property are equivalent,
+ that the overloaded and nonoverloaded rewrite relations agree, and
+ that variable overlaps do not yield critical pairs. We prove that it
+ is decidable whether or not a rewrite rule is sort-decreasing, even if
+ the signature is not regular.
+
+ Finally, we demonstrate that every overloaded completion procedure may
+ also be used in the nonoverloaded world, but not conversely, and that
+ specifications exist that can only be completed using the
+ nonoverloaded semantics.",
+ paper = "Wald92.pdf"
+}
+
+\end{chunk}
+
+\index{Comon, Hubert}
+\begin{chunk}{axiom.bib}
+@inproceedings{Como90,
+ author = "Comon, Hubert",
+ title = "Equational Formulas in Order-sorted Algebras",
+ booktitle = "IICALP 90. Automata, Languages and Programming",
+ year = "1990",
+ pages = "674-688",
+ abstract =
+ "We propose a set of transformation rules for first order formulas
+ whose atoms are either equations between terms or “sort constraints” t
+ ε s where s is a regular tree language (or a sort in the algebraic
+ specification community). This set of rules is proved to be correct,
+ terminating and complete. This shows in particular that the first
+ order theory of any rational tree language is decidable, extending the
+ results of [Mal71,CL89,Mah88]. We also show how to apply our results
+ to automatic inductive proofs in equational theories."
+}
+
+\end{chunk}
+
+\index{MacLane, Saunders}
+\begin{chunk}{axiom.bib}
+@book{Macl91,
+ author = "MacLane, Saunders",
+ title = "Categories for the Working Mathematician",
+ publisher = "Springer",
+ year = "1991",
+ isbn = "0-387-98403-8",
+ link = "\url{http://www.maths.ed.ac.uk/~aar/papers/maclanecat.pdf}",
+ paper = "Macl91.pdf"
+}
+
+\end{chunk}
+
+\index{Schubert, Horst}
+\begin{chunk}{axiom.bib}
+@book{Schu72,
+ author = "Schubert, Horst",
+ title = "Categories",
+ publisher = "Springer-Verlag",
+ year = "1972"
+}
+
+\end{chunk}
+
+\index{Freyd, Peter J.}
+\index{Scedrov, Andre}
+\begin{chunk}{axiom.bib}
+@book{Frey90,
+ author = "Freyd, Peter J. and Scedrov, Andre",
+ title = "Categories, Allegories",
+ publisher = "Elsevier Science",
+ year = "1990",
+ isbn = "0-444-70368-3"
+}
+
+\end{chunk}
+
+\index{Rydeheard, D. E.}
+\index{Burstall, R. M.}
+\begin{chunk}{axiom.bib}
+@book{Ryde88,
+ author = "Rydeheard, D. E. and Burstall, R. M.",
+ title = "Computational Category Theory",
+ publisher = "Prentice Hall",
+ year = "1988",
+ isbn = "978-0131627369"
+}
+
+\end{chunk}
+
+\index{Ehrig, Hartmut}
+\index{Mahr, Bernd}
+\begin{chunk}{axiom.bib}
+@book{Ehri85,
+ author = "Ehrig, Hartmut and Mahr, Bernd",
+ title = "Fundamentals of Algebraic Specification 1: Equations and
+ Initial Semantics",
+ publisher = "Springer Verlag",
+ year = "1985",
+ isbn = "978-0387137186"
+}
+
+\end{chunk}
+
+\index{Loos, Rudiger}
+\begin{chunk}{axiom.bib}
+@article{Loos72,
+ author = "Loos, Rudiger",
+ title = "Algebraic Algorithm Descriptions as Programs",
+ journal = "ACM SIGSAM Bulletin",
+ volume = "23",
+ year = "1972",
+ pages = "16-24",
+ abstract =
+ "We propose methods for writing algebraic programs in an algebraic
+ notation. We discuss the advantages of this approach and a specific
+ example",
+ paper = "Loos72.pdf"
+}
+
+\end{chunk}
+
+\index{Loos, Rudiger}
+\begin{chunk}{axiom.bib}
+@article{Loos76,
+ author = "Loos, Rudiger",
+ title = "The Algorithm Description Language (ALDES) (report)",
+ journal = "ACM SIGSAM Bulletin",
+ volume = "10",
+ number = "1",
+ year = "1976",
+ pages = "14-38",
+ abstract =
+ "ALDES is a formalization of the method to describe algorithms used in
+ Knuth's books. The largest documentation of algebraic algorithms,
+ Collins' SAC system for Computer Algebra, is written in this
+ language. In contrast to PASCAL it provides automatic storage
+ deallocation. Compared to LISP equal emphasis was placed on efficiency
+ of arithmetic, list processing, and array handling. To allow the
+ programmer full control of efficiency all mechanisms of the system are
+ accessible to him. Currently ALDES is available as a preprocessor to
+ ANSI Fortran, using no additional primitives.",
+ paper = "Loos76.pdf"
+}
+
+\end{chunk}
+
+\index{Loos, Rudiger}
+\index{Collins, George E.}
+\begin{chunk}{axiom.bib}
+@book{Loos92,
+ author = "Loos, Rudiger and Collins, George E.",
+ title = "Revised Report on the ALgorithm Language ALDES",
+ publisher = "Institut fur Informatik",
+ year = "1992"
+}
+
+\end{chunk}
+
+\index{Collins, George E.}
+\index{Loos, Rudiger}
+\begin{chunk}{axiom.bib}
+@techreport{Coll90,
+ author = "Collins, George E. and Loos, Rudiger",
+ title = "Specification and Index of SAC-2 Algorithms",
+ institution = "Univ. of Tubingen",
+ type = "technical report",
+ year = "1990",
+ number = "WSI-90-4"
+}
+
+\end{chunk}
+
+\index{Buendgen, R.}
+\index{Hagel, G.}
+\index{Loos, R.}
+\index{Seitz, S.}
+\index{Simon, G.}
+\index{Stuebner, R.}
+\index{Weber, A.}
+\begin{chunk}{axiom.bib}
+@article{Buen91,
+ author = "Buendgen, R. and Hagel, G. and Loos, R. and Seitz, S. and
+ Simon, G. and Stuebner, R. and Weber, A.",
+ title = "SAC-2 in ALDES -- Ein Werkzeug fur dis Algorithmenforschung",
+ journal = "MathPAD 1",
+ volume = "3",
+ year = "1991",
+ pages = "33-37"
+}
+
+\end{chunk}
+
+\index{Weber, Andreas}
+\begin{chunk}{axiom.bib}
+@techreport{Webe92
+ author = "Weber, Andreas",
+ title = "Structuring the Type System of a Computer Algebra System",
+ link = "\url{http://cg.cs.uni-bonn.de/personal-pages/weber/publications/pdf/WeberA/Weber92a.pdf}",
+ institution = "Wilhelm-Schickard-Institut fur Informatik",
+ year = "1992",
+ abstract = "
+ Most existing computer algebra systems are pure symbol manipulating
+ systems without language support for the occuring types. This is
+ mainly due to the fact taht the occurring types are much more
+ complicated than in traditional programming languages. In the last
+ decade the study of type systems has become an active area of
+ research. We will give a proposal for a type system showing that
+ several problems for a type system of a symbolic computation system
+ can be solved by using results of this research. We will also provide
+ a variety of examples which will show some of the problems that remain
+ and that will require further research.",
+ paper = "Webe92b.pdf",
+ keywords = "axiomref"
+
+\end{chunk}
+
+\index{Bronstein, Manuel}
+\begin{chunk}{axiom.bib}
+@misc{Bron90,
+ author = "Bronstein, Manuel",
+ title =
+ "$\sum^{IT}$ -- A strongly-typed embeddable computer algebra library",
+ link = "\url{http://www-sop.inria.fr/cafe/Manuel.Bronstein/publications/mb_papers.html}",
+ abstract = "
+ We describe the new computer algebra library $\sum^{IT}$ and its
+ underlying design. The development of $\sum^{IT}$ is motivated by the
+ need to provide highly efficient implementations of key algorithms for
+ linear ordinary differential and ($q$)-difference equations to
+ scientific programmers and to computer algebra users, regardless of
+ the programming language or interactive system they use. As such,
+ $\sum^{IT}$ is not a computer algebra system per se, but a library (or
+ substrate) which is designed to be ``plugged'' with minimal efforts
+ into different types of client applications.",
+ paper = "Bron96.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Reynolds, John C.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Reyo74,
+ author = "Reynolds, John C.",
+ title = "Towards a Theory of Type Structure",
+ booktitle = "Colloquim on Programming",
+ year = "1974",
+ pages = "9-11",
+ paper = "Reyo74.pdf"
+}
+
+\end{chunk}
+
+\index{Chen, Kung}
+\index{Hudak, Paul}
+\index{Odersky, Martin}
+\begin{chunk}{axiom.bib}
+@inproceedings{Chen92,
+ author = "Chen, Kung and Hudak, Paul and Odersky, Martin",
+ title = "Parametric Type Classes",
+ booktitle = "Proc. ACM Conf. on LISP and Functional Programming",
+ year = "1992",
+ pages = "170-181"
+ abstract =
+ "We propose a generalization to Haskell's type classes where a class
+ can have type parameters besides the placeholder variable. We show
+ that this generalization is essential to represent container classes
+ with overloaded data constructor and selector operations. We also show
+ that the resulting type system has principal types and present
+ unification and type reconstruction algorithms.",
+ paper = "Chen92.pdf"
+}
+
+\end{chunk}
+
+\index{Schoenfinkel, M.}
+\begin{chunk}{axiom.bib}
+@misc{Scho24,
+ author = "Schoenfinkel, M.",
+ title = "Uber die Bausteine der mathematischen Logik",
+ year = "1924",
+ pages = "305-316"
+}
+
+\end{chunk}
+
+\index{Jones, Simon Peyton}
+\begin{chunk}{axiom.bib}
+@book{Jone87,
+ author = "Jones, Simon Peyton",
+ title = "The Implementation of Functional Programming Languages",
+ publisher = "Simon and Schuster",
+ year = "1987",
+ isbn = "0-13-453333-X",
+ paper = "Jone87.pdf"
+}
+
+\end{chunk}
+
+\index{Leiss, Hans}
+\begin{chunk}{axiom.bib}
+@inproceedings{Leis87,
+ author = "Leiss, Hans",
+ title = "On Type Inference for Object-Oriented Programming Languages",
+ booktitle = "Int. Workshop on Computer Science Logic",
+ year = "1987",
+ pages = "151-172",
+ abstract =
+ "We present a type inference calculus for object-oriented programming
+ languages. Explicit polymorphic types, subtypes and multiple
+ inheritance are allowed. Class types are obtained by selection from
+ record types, but not considered subtypes of record types. The subtype
+ relation for class types reflects the (mathematically clean)
+ properties of subclass relations in object-oriented programming to a
+ better extend than previous systems did.
+
+ Based on Mitchells models for type inference, a semantics for types is
+ given where types are sets of values in a model of type-free lambda
+ calculus. For the sublanguage without type quantifiers and subtype
+ relation, automatic type inference is possible by extending Milners
+ algorithm W to deal with a polymorphic fixed-point rule.",
+}
+
+\end{chunk}
+
+\index{Kfoury, A.J.}
+\index{Tiuryn, J.}
+\index{Utzyczyn, P.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Kfou88,
+ author = "Kfoury, A.J. and Tiuryn, J. and Utzyczyn, P.",
+ title = "A Proper Extension of ML with an Effective Type-Assignment",
+ booktitle = "POPL 88",
+ year = "1988",
+ pages = "58-69",
+ abstract =
+ "We extend the functional language ML by allowing the recursive calls
+ to a function F on the right-hand side of its definition to be at
+ different types, all generic instances of the (derived) type of F on
+ the left-hand side of its definition. The original definition of ML
+ does not allow this feature. This extension does not produce new types
+ beyond the usual universal polymorphic types of ML and satisfies the
+ properties already enjoyed by ML: the principal-type property and the
+ effective type-assignment property.",
+ paper = "Kfou88.pdf"
+}
+
+\end{chunk}
+
+\index{Tiuryn, J.}
+\begin{chunk}{axiom.bib}
+@article{Tiur90,
+ author = "Tiuryn, J.",
+ title = "Type Inference Problems -- A Survey",
+ journal = "LNCS",
+ volume = "452",
+ pages = "105-120",
+ year = "1990",
+ paper = "Tiur90.pdf"
+}
+
+\end{chunk}
+
+\index{Kfoury, A. J.}
+\index{Tiuryn, J.}
+\index{Urzyczyn, P.}
+\begin{chunk}{axiom.bib}
+@article{Kfou93,
+ author = "Kfoury, A. J. and Tiuryn, J. and Urzyczyn, P.",
+ title = "The Undecidability of the Semi-unification Problem",
+ journal = "Information and Computation",
+ volume = "102",
+ number = "1",
+ year = "1993",
+ pages = "83-101"
+ abstract =
+ "The Semi-Unification Problem (SUP) is a natural generalization of
+ both first-order unification and matching. The problem arises in
+ various branches of computer science and logic. Although several
+ special cases of SUP are known to be decidable, the problem in general
+ has been open for several years. We show that SUP in general is
+ undecidable, by reducing what we call the "boundedness problem" of
+ Turing machines to SUP. The undecidability of this boundedness problem
+ is established by a technique developed in the mid-1960s to prove
+ related results about Turing machines.",
+ paper = "Kfou93.pdf"
+}
+
+\end{chunk}
+
+\begin{chunk}{axiom.bib}
+@misc{Comp17,
+ author = "CompCert",
+ title = "The CompCert Formally Certified C Compiler",
+ year = "2017",
+ link = "\url{http://compcert.inria.fr}"
+}
+
+\end{chunk}
+
+\index{Hudak, Paul}
+\index{Peterson, John}
+\index{Fasel, Joseph H.}
+\begin{chunk}{axiom.bib}
+@misc{Huda99,
+ author = "Hudak, Paul and Peterson, John and Fasel, Joseph H.",
+ title = "A Gentle Introduction to Haskell 98",
+ year = "1999",
+ link = "\url{https://www.haskell.org/tutorial/haskell-98-tutorial.pdf}",
+ paper = "Huda99.pdf"
+}
+
+\end{chunk}
+
+\index{Faxen, Karl-Filip}
+\begin{chunk}{axiom.bib}
+@article{Faxe02,
+ author = "Faxen, Karl-Filip",
+ title = "A Static Sematics for Haskell",
+ year = "2002",
+ journal = "J. Functional Programming",
+ volume = "12",
+ number = "4-5",
+ pages = "295-357",
+ abstract =
+ "This paper gives a static semantics for Haskell 98, a non-strict
+ purely functional programming language. The semantics formally speci
+ es nearly all the details of the Haskell 98 type system, including the
+ resolution of overloading, kind inference (including defaulting) and
+ polymorphic recursion, the only major omission being a proper
+ treatment of ambiguous overloading and its resolution. Overloading is
+ translated into explicit dictionary passing, as in all current
+ implementations of Haskell. The target language of this translation is
+ a variant of the Girard{Reynolds polymorphic lambda calculus featuring
+ higher order polymorphism and explicit type abstraction and
+ application in the term language. Translated programs can thus still
+ be type checked, although the implicit version of this system is
+ impredicative. A surprising result of this formalization e ort is that
+ the monomorphism restriction, when rendered in a system of inference
+ rules, compromises the principal type property.",
+ paper = "Faxe02.pdf"
+}
+
+\end{chunk}
+
+\index{MacLane, Saunders}
+\begin{chunk}{axiom.bib}
+@book{Macl92,
+ author = "MacLane, Saunders",
+ title = "Sheaves in Geometry and Logic: A First Introduction to Topos
+ Theory",
+ year = "1992",
+ isbn = "978-0-387-97710-2",
+ publisher = "Springer"
+}
+
+\end{chunk}
+
+\index{Manes, Ernest G.}
+\begin{chunk}{axiom.bib}
+@book{Mane76,
+ author = "Manes, Ernest G.",
+ title = "Algebraic Theories",
+ publisher = "Springer",
+ year = "1976",
+ series = "Graduate Texts in Mathematics",
+ isbn = "978-1-9860-1"
+}
+
+\end{chunk}
+
+\index{Paterson, M. S.}
+\begin{chunk}{axiom.bib}
+@article{Pate78,
+ author = "Paterson, M. S.",
+ title = "Linear Unification",
+ journal = "J. Computer and System Sciences",
+ volume = "16",
+ number = "2",
+ year = "1978",
+ pages = "158-167",
+ abstract =
+ "A unification algorithm is described which tests a set of expressions
+ for unifiability and which requires time and space which are only linear
+ in the size of the input",
+ paper = "Pate78.pdf"
+}
+
+\end{chunk}
+
+\index{Kanellakis, Paris C.}
+\index{Mairson, Harry G.}
+\index{Mitchell, John C.}
+\begin{chunk}{axiom.bib}
+@techreport{Kane90,
+ author = "Kanellakis, Paris C. and Mairson, Harry G. and Mitchell, John C.",
+ title = "Unification and ML Type Reconstruction",
+ link = "\url{ftp://ftp.cs.brown.edu/pub/techreports/90/cs90-26.pdf}",
+ institution = "Brown University",
+ year = "1990",
+ number = "CS-90-26",
+ abstract =
+ "We study the complexity of type reconstruction for a core fragment of
+ ML with lambda abstraction, function application, and the polymorphic
+ {\bf let} declaration. We derive exponential upper and lower bounds on
+ recognizing the typable core ML expressions. Our primary technical
+ tool is unification of succinctly represented type expressions. After
+ observing that core ML expressions, of size $n$, can be typed in
+ DTIME($s^n$), we exhibit two different families of programs whose
+ principal types grow exponentially. We show how to exploit the
+ expressiveness of the {\bf let}-polymorphism in these constructions to
+ derive lower bounds on deciding typability: one leads naturally to
+ NP-hardness and the other to DTIME($2^{n^k}$)-hardness for each integer
+ $k\ge 1$. Our generic simulation of any exponential time Turing
+ Machine by ML type reconstruction may be viewed as a nonstandard way
+ of computing with types. Our worse-case lower bounds stand in contrast
+ to practical experience, which suggests that commonly used algorithms
+ for type reconstruction do not slow compilation substantially.",
+ paper = "Kane90.pdf"
+}
+
+\end{chunk}
+
+\index{Volpano, Dennis M.}
+\index{Geoffrey S.}
+\begin{chunk}{axiom.bib}
+@techreport{Volp91,
+ author = "Volpano, Dennis M. and Geoffrey S.",
+ title = "On the Complexity of ML Typability with Overloading",
+ institution = "Cornell University",
+ year = "1991",
+ number = "TR91-1210",
+ abstract =
+ "We examine the complexity of type checking in an ML-style type system
+ that permits functions to be overloaded with different types. In
+ particular, we consider the extension of the ML Type system proposed
+ by Wadler and Blott in the appendix of [WB89], with global overloading
+ only, that is, where the only overloading is that which exists in an
+ initial type assumption set; no local overloading via over and inst
+ expressions is allowed. It is shown that under a correct notion of
+ well-typed terms, the problem of determining whether a term is well
+ typed with respect to an assumption set in this system is
+ undecidable. We then investigate limiting recursion in assumption
+ sets, the source of the undecidability. Barring mutual recursion is
+ considered, but this proves too weak, for the problem remains
+ undecidable. Then we consider a limited form of recursion called
+ parametric recursion. We show that although the problem becomes
+ decidable under parametric recursion, it appears harder than
+ conventional ML typability, which is complete for DEXPTIME [Mai90].",
+ paper = "Volp91.pdf"
+}
+
+\end{chunk}
+
+\index{Hodges, Wilfrid}
+\begin{chunk}{axiom.bib}
+@article{Hodg95,
+ author = "Hodges, Wilfrid",
+ title = "The Meaning of Specifications I: Domains and Initial Models",
+ journal = "Theoretical Computer Science",
+ volume = "192",
+ issue = "1",
+ year = "1995",
+ pages = "67-89",
+ abstract =
+ "This is the first of a short series of papers intended to provide one
+ common semantics for several different types of specification
+ language, in order to allow comparison and translations. The
+ underlying idea is that a specification describes the behaviour of a
+ system, depending on parameters. We can represent this behaviour as a
+ functor which acts on structures representing the parameters, and
+ which yields a structure representing the behaviour. We characterise
+ in domain-theoretic terms the class of functors which could in
+ principle be specified and implemented; briefly, they are the functors
+ which preserve directed colimits and whose restriction to finitely
+ presented structures is recursively enumerable. We also characterise
+ those functors which allow specification by initial semantics in
+ universal Horn classes with finite vocabulary; these functors consist
+ of a free functor (i.e. left adjoint of a forgetful functor) followed
+ by a forgetful functor. The main result is that these two classes of
+ functor are the same up to natural isomorphism.",
+ paper = "Hodg95.pdf"
+}
+
+\end{chunk}
+
+\index{Graetzer, George}
+\begin{chunk}{axiom.bib}
+@book{Grae79,
+ author = "Graetzer, George",
+ title = "Universal Algebra",
+ publisher = "Springer",
+ isbn = "978-0-387-77486-2"
+ year = "1979",
+ paper = "Grae79.pdf"
+}
+
+\end{chunk}
+
+\index{Dershowitz, Nachum}
+\index{Jouannaud, Jean-Pierre}
+\begin{chunk}{axiom.bib}
+@techreport{Ders89,
+ author = "Dershowitz, Nachum and Jouannaud, Jean-Pierre",
+ title = "Rewrite Systems",
+ year = "1989",
+ number = "478",
+ institution = "Laboratoire de Recherche en Informatique",
+ paper = "Ders89.pdf"
+}
+
+\end{chunk}
+
+\index{Chang, C.C.}
+\index{Keisler, H. Jerome}
+\begin{chunk}{axiom.bib}
+@book{Chan90,
+ author = "Chang, C.C. and Keisler, H. Jerome",
+ title = "Model Theory",
+ publisher = "North Holland"
+ year = "1990",
+ comment = "Studics in Logic and the Foundations of Mathematics",
+ volume = "73",
+ abstract =
+ "Since the second edition of this book (1977), Model Theory has
+ changed radically, and is now concerned with fields such as
+ classification (or stability) theory, nonstandard analysis,
+ model-theoretic algebra, recursive model theory, abstract model
+ theory, and model theories for a host of nonfirst order logics. Model
+ theoretic methods have also had a major impact on set theory,
+ recursion theory, and proof theory.
+
+ This new edition has been updated to take account of these changes,
+ while preserving its usefulness as a first textbook in model
+ theory. Whole new sections have been added, as well as new exercises
+ and references. A number of updates, improvements and corrections have
+ been made to the main text",
+}
+
+\end{chunk}
+
+\index{Goguen, Joseph A.}
+\index{Winkler, Timothy}
+\index{Meseguer, Jose}
+\index{Futatsugi, Kokichi}
+\index{Jouannaud, Jean-Pierre}
+\begin{chunk}{axiom.bib}
+@techreport{Gogu92,
+ author = "Goguen, Joseph A. and Winkler, Timothy and Meseguer, Jose and
+ Futatsugi, Kokichi and Jouannaud, Jean-Pierre",
+ title = "Introducing OBJ",
+ institution = "SRI International",
+ number = "SIR-CSL-92-03",
+ year = "1992",
+ abstract =
+ "This is an introduction to OBJ, describing its philosophy, its
+ syntax, and aspects of its semantics, both logical and operational,
+ with many examples, based on Release 2.0 of OBJ3. OBJ is a wide
+ spectrum first-order functional language that is rigorously based upon
+ equational logic. This semantic basis supports a declarative,
+ specificational style, facilitates program verification, and allows
+ OBJ to be used as a theorem prover. OBJ3 is based upon order-sorted
+ equational logic, which provides a notion of subsort that rigorously
+ supports multiple inheritance, exception handling and
+ overloading. OBJ3 also provides parameterized programming, a technique
+ which provides powerful support for design, verification, reuse, and
+ maintenance.
+
+ This facility is based on using two kinds of module: objects to
+ encapsulate executable code, and in particular to define abstract data
+ types by initial algebra semantics; and theories to specify both
+ syntactic structure and semantic properties for modules and module
+ interfaces. Each kind of module can be parameterized, where actual
+ parameters are modules. For parameter instantiation, a view binds the
+ formal entities in an interface theory to actual entities in a module,
+ and also asserts that the target module satisfies the semantic
+ requirements of the interface theory. Module expressions allow complex
+ combinations of already defined modules, including sums,
+ instantiations, and transformations; moreover, evaluating a module
+ expression actually constructs the described software (sub)system from
+ the given components.
+
+ Default views can greatly reduce the effort of instantiating
+ modules. We argue that first-order parameterized programming includes
+ much ofthe power of higher-order programming. Although OBJ executable
+ code normally consists of equations that are interpreted as rewrite
+ rules, OBJ objects can also encapsulate Lisp code, e.g., to provide
+ efficient built-in data types, or to augment the system with new
+ capabilities; we describe the syntax of the facility, and provide some
+ examples. In addition, OBJ provides rewriting modulo associative,
+ commutative and/or identity equations, as well as user-definable
+ evaluation strategies that allow lazy, eager, and mixed evaluation
+ strategies on an operation-by-operation basis; memoization [sic] is
+ also available on an operation-by-operation basis. Finally, OBJ
+ provides user-definable mixfix syntax, which supports using the
+ notational conventions of particular application domains.",
+ paper = "Gogu92.pdf"
+}
+
+\end{chunk}
+
+\index{Limongelli, C.}
+\index{Temperini, M.}
+\begin{chunk}{axiom.bib}
+@article{Limo92,
+ author = "Limongelli, C. and Temperini, M.",
+ title = "Abstract Specification of Structures and Methods in Symbolic
+ Mathematical Computation",
+ journal = "Theoretical Computer Science",
+ volume = "104",
+ year = "1992",
+ pages = "89-107",
+ abstract =
+ "This paper describes a methodology based on the object-oriented
+ programming paradigm, to support the design and implementation of a
+ symbolic computation system. The requirements of the system are
+ related to the specification and treatment of mathematical
+ structures. This treatment is considered from both the numerical and
+ the symbolic points of view. The resulting programming system should
+ be able to support the formal definition of mathematical data
+ structures and methods at their highest level of abstraction, to
+ perform computations on instances created from such definitions, and
+ to handle abstract data structures through the manipulation of their
+ logical properties. Particular consideration is given to the
+ correctness aspects. Some examples of convenient application of the
+ proposed design methodology are presented.",
+ paper = "Limo92.pdf"
+}
+
+\end{chunk}
+
+\index{Breazu-Tannen, V.}
+\index{Coquand, T.
+\index{Gunter, C.A.}
+\index{Scedrov, A.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Brea89,
+ author = "Breazu-Tannen, V. and Coquand, T. and Gunter, C.A. and
+ Scedrov, A.",
+ title = "Inheritance and Explicit Coercion",
+ booktitle = "Logic in Computer Science",
+ year = "1989",
+ isbn = "0-8186-1954-6",
+ abstract =
+ "A method is presented for providing semantic interpretations for
+ languages which feature inheritance in the framework of statically
+ checked, rich type disciplines. The approach is illustrated by an
+ extension of the language Fun of L. Cardelli and P. Wegner (1985),
+ which is interpreted via a translation into an extended polymorphic
+ lambda calculus. The approach interprets inheritances in Fun as
+ coercion functions already definable in the target of the
+ translation. Existing techniques in the theory of semantic domains can
+ then be used to interpret the extended polymorphic lambda calculus,
+ thus providing many models for the original language. The method
+ allows the simultaneous modeling of parametric polymorphism, recursive
+ types, and inheritance, which has been regarded as problematic because
+ of the seemingly contradictory characteristics of inheritance and type
+ recursion on higher types. The main difficulty in providing
+ interpretations for explicit type disciplines featuring inheritance is
+ identified. Since interpretations follow the type-checking
+ derivations, coherence theorems are required, and the authors prove
+ them for their semantic method.",
+ paper = "Brea89.pdf"
+}
+
+\end{chunk}
+
+\index{Breazu-Tannen, Val}
+\index{Coquand, Thierry}
+\index{Gunter, Carl A.}
+\index{Scedrov, Andre}
+\begin{chunk}{axiom.bib}
+@article{Brea91,
+ author = "Breazu-Tannen, Val and Coquand, Thierry and Gunter, Carl A. and
+ Scedrov, Andre",
+ title = "Inheritance as Implicit Coercion",
+ volume = "93",
+ number = "1",
+ year = "1991",
+ pages = "172-221",
+ abstract =
+ "We present a method for providing semantic interpretations for
+ languages with a type system featuring inheritance polymorphism. Our
+ approach is illustrated on an extension of the language Fun of
+ Cardelli and Wegner, which we interpret via a translation into an
+ extended polymorphic lambda calculus. Our goal is to interpret
+ inheritances in Fun via coercion functions which are definable in the
+ target of the translation. Existing techniques in the theory of
+ semantic domains can be then used to interpret the extended
+ polymorphic lambda calculus, thus providing many models for the
+ original language. This technique makes it possible to model a rich
+ type discipline which includes parametric polymorphism and recursive
+ types as well as inheritance. A central difficulty in providing
+ interpretations for explicit type disciplines featuring inheritance in
+ the sense discussed in this paper arises from the fact that programs
+ can type-check in more than one way. Since interpretations follow the
+ type-checking derivations, coherence theorems are required: that is,
+ one must prove that the meaning of a program does not depend on the
+ way it was type-checked. Proofs of such theorems for our proposed
+ interpretation are the basic technical results of this
+ paper. Interestingly, proving coherence in the presence of recursive
+ types, variants, and abstract types forced us to reexamine fundamental
+ equational properties that arise in proof theory (in the form of
+ commutative reductions) and domain theory (in the form of strict
+ vs. non-strict functions).",
+ paper = "Brea91.pdf"
+}
+
+\end{chunk}
+
+\index{Smolka, G.}
+\begin{chunk}{axiom.bib}
+@phdthesis{Smol89a,
+ author = "Smolka, G.",
+ title = "Logic Programming over Polymorphically Order-Sorted Types"
+ institution = "Fachbereich Informatik, Universitat Kaiserslautern",
+ year = "1989"
+}
+
+\end{chunk}
+
+\index{Wirsing, Martin}
+\index{Broy, Manfred}
+\begin{chunk}{axiom.bib}
+@inproceedings{Wirs82,
+ author = "Wirsing, Martin and Broy, Manfred",
+ title = "An Analysis of Semantic Models for Algebraic Specifications",
+ booktitle = "Theoretical Foundations of Programming Methodology",
+ year = "1982",
+ publisher = "Springer",
+ pages = "351-413",
+ isbn = "978-94-009-7893-5",
+ abstract =
+ "Data structures, algorithms and programming languages can be
+ described in a uniform implementation-independent way by axiomatic
+ abstract data types i.e. by algebraic specifications defining
+ abstractly the properties of objects and functions. Different semantic
+ models such as initial and terminal algebras have been proposed in
+ order to specify the meaning of such specifications -often involving a
+ considerable amount of category theory. A more concrete semantics
+ encompassing these different approaches is presented:
+
+ Abstract data types are specified in hierarchies, employing
+ ``primitive'' types on which other types are based. The semantics is
+ defined to be the class of all partial heterogeneous algebras
+ satisfying the axioms and respecting the hierarchy. The interpretation
+ of a specification as its initial or terminal algebra is just a
+ constraint on the underlying data. These constraints can be modified
+ according to the specification goals. E.g. the data can be specified
+ using total functions; for algorithms partial functions with
+ syntactically checkable domains seem appropriate whereas for
+ programming languages the general notion of partiality is needed,
+ Model-theoretic and deduction-oriented conditions are developed which
+ ensure properties leading to criteria for the soundness and complexity
+ of specifications. These conditions are generalized to parameterized
+ types, i.e. type procedures mapping types into types. Syntax and
+ different semantics of parameter are defined and discussed. Criteria
+ for proper parameterized specifications are developed. It is shown
+ that the properties of proper specifications viz. of snowballing and
+ impeccable types are preserved under application of parameterized
+ types — finally guaranteeing that the composition of proper small
+ specifications always leads to a proper large specification.",
+
+\end{chunk}
+
+\begin{chunk}{axiom.bib}
+@misc{GAPx17,
+ author = "The GAP Group",
+ title = "GAP - Reference Manual",
+ year = "2017",
+ link = "\url{https://www.gap-system.org/Manuals/doc/ref/manual.pdf}"
+}
+
+\end{chunk}
+
+\index{Char, Bruce}
+\index{Geddes, Keith O.}
+\index{Gonnet, Gaston H.}
+\index{Leong, Benton}
+\index{Monagan, Michael B.}
+\index{Watt, Stephen M.}
+\begin{chunk}{axiom.bib}
+@book{Char91a,
+ author = "Char, Bruce and Geddes, Keith O. and Gonnet, Gaston H. and
+ Leong, Benton and Monagan, Michael B. and Watt, Stephen M.",
+ title = "Maple V Library Reference Manual",
+ publisher = "Springer",
+ year = "1991",
+ isbn = "978-1-4757-2133-1"
+ abstract =
+ "The design and implementation of the Maple system is an on-going
+ project of the Symbolic Com putation Group at the University of
+ Waterloo in Ontario, Canada. This manual corresponds with version V
+ (roman numeral five) of the Maple system. The on-line help subsystem
+ can be invoked from within a Maple session to view documentation on
+ specific topics. In particular, the command ?updates points the user
+ to documentation updates for each new version of Maple. The Maple
+ project was first conceived in the autumn of 1980, growing out of
+ discussions on the state of symbolic computation at the University of
+ Waterloo. The authors wish to acknowledge many fruitful discussions
+ with colleagues at the University of Waterloo, particularly Morven
+ Gen tleman, Michael Malcolm, and Frank Tompa. It was recognized in
+ these discussions that none ofthe locaIly-available systems for
+ symbolic computation provided the facilities that should be expected
+ for symbolic computation in modern computing environments. We
+ concluded that since the basic design decisions for the then-current
+ symbolic systems such as ALTRAN, CAMAL, REDUCE, and MACSYMA were based
+ on 1960's computing technology, it would be wise to design a new
+ system ``from scratch//. Thus we could take advantage of the software
+ engineering technology which had become available in recent years, as
+ well as drawing from the lessons of experience. Maple's basic features
+ (elementary data structures, Input/output, arithmetic with numbers,
+ and elementary simplification) are coded in a systems programming
+ language for efficiency."
+}
+
+\end{chunk}
+
+\index{Monk, J. Donald}
+\begin{chunk}{axiom.bib}
+@book{Monk76,
+ author = "Monk, J. Donald",
+ title = "Mathematical Logic",
+ publisher = "Springer",
+ year = "1976",
+ isbn = "978-1-4684-9452-5"
+}
+
+\end{chunk}
+
+\index{Meyer, Albert R.}
+\index{Reinhold, Mark B.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Meye86,
+ author = "Meyer, Albert R. and Reinhold, Mark B.",
+ title = "Type is not a type",
+ booktitle = "POPL 86",
+ pages = "287-295",
+ abstract =
+ "A function has a dependent type when the type of its result
+ depends upon the value of its argument. Dependent types originated in
+ the type theory of intuitionistic mathematics and have reappeared
+ independently in programming languages such as CLU, Pebble, and
+ Russell. Some of these languages make the assumption that there exists
+ a type-of-all-types which is its own type as well as the type
+ of all other types. Girard proved that this approach is inconsistent
+ from the perspective of intuitionistic logic. We apply Girard's
+ techniques to establish that the type-of-all-types assumption creates
+ serious pathologies from a programming perspective: a system using
+ this assumption is inherently not normalizing, term equality is
+ undecidable, and the resulting theory fails to be a conservative
+ extension of the theory of the underlying base types. The failure of
+ conservative extension means that classical reasoning about programs
+ in such a system is not sound.",
+}
+
+\end{chunk}
+
+\index{Howe, Douglas J.}
+\begin{chunk}{axiom.bib}
+@techreprot{Howe87,
+ author = "Howe, Douglas J.",
+ title = "The Computational Behaviour of Girard's Paradox",
+ institution = "Cornell University",
+ year = "1987",
+ link = "\url{https://ecommons.cornell.edu/handle/1813/6660}",
+ number = "TR 87-820",
+ abstract =
+ "In their paper ``Type'' Is Not a Type, Meyer and Reinhold argued that
+ serious pathologies can result when a type of all types is added to a
+ programing language with dependent types. Central to their argument is
+ the claim that by following the proof of Girard's paradox it is
+ possible to construct in their calculus $\lambda^{\tau \tau}$ a term
+ having a fixed-point property. Because of the tremendous amount of
+ formal detail involved, they were unable to establish this claim. We
+ have made use of the Nuprl proof development system in constructing a
+ formal proof of Girard's paradox and analysing the resulting term. We
+ can show that the term does not have the desired fixed-point property,
+ but does have a weaker form of it that is sufficient to establish some
+ of the results of Meyer and Reinhold. We believe that the method used
+ here is in itself of some interest, representing a new kind of
+ application of a computer to a problem in symbolic logic.",
+}
+
+\end{chunk}
+
+\index{Coquand, T.}
+\begin{chunk}{axiom.bib}
+@techreport{Coqu86,
+ author = "Coquand, Thierry",
+ title = "An Analysis of Girard's Paradox",
+ institution = "Institut National de Recherche en Informatique et en
+ Automatique",
+ year = "1986",
+ abstract =
+ "We study the consistency of a few formal systems, specially some
+ extensions of Church's calculus and the construction system. We show
+ that Church's calculus is not compatible with the notion of
+ second-order type. We apply this result for showing that the calculus
+ of construction with four levels is inconsistent. We suggest finally
+ some consistent extensions of these two calculi."
+ paper = "Coqu86.pdf"
+}
+
+\end{chunk}
+
+\index{Cardelli, Luca}
+\begin{chunk}{axiom.bib}
+@article{Card88,
+ author = "Cardelli, Luca",
+ title = "A Semantics of Multiple Inheritance",
+ journal = "Information and Computation",
+ volume = "76",
+ number = "2-3",
+ year = "1988",
+ pages = "138-164",
+ paper = "Card88.pdf"
+}
+
+\end{chunk}
+
+\index{Godel, Kurt}
+\begin{chunk}{axiom.bib
+@misc{Gode58,
+ author = "Godel, Kurt",
+ title = {\"Uber eine bisher noch nicht benutzte Erweiterung des Finiten
+ Standpunktes},
+ journal = "Dialectica 12",
+ year = "1958",
+ pages = "280-287"
+}
+
+\end{chunk}
+
+\index{Girard, Jean-Yves}
+\index{Taylor, Paul}
+\index{Lafont, Yves}
+\begin{chunk}{axiom.bib}
+@book{Gira89,
+ author = "Girard, Jean-Yves",
+ title = "Proofs and Types",
+ publisher = "Cambridge University Press",
+ year = "1989"
+}
+
+\end{chunk}
+
+\index{Pierce, Benjamin C.}
+\begin{chunk}{axiom.bib}
+@phdthesis{Pier91,
+ author = "Pierce, Benjamin C.",
+ title = "Programming with Intersection Types and Bounded Polymorphism",
+ institution = "Carnegie Mellon University",
+ year = "1991",
+ comment = "CMU-CS-91-205",
+ abstract =
+ "Intersection types and bounded quantification are complementary
+ mechanisms for extending the expressive power of statically typed
+ programming languages. They begin with a common framework: a simple,
+ typed language with higher-order functions and a notion of subtyping.
+ Intersection types extend this framework by giving every pair of types
+ $\sigma$ and $\tau$ a greatest lower bound, $\sigma \land \tau$,
+ corresponding intuitively to the intersection of the sets of values
+ described by $\sigma$ and $\tau$. Bounded quantification extends the
+ basic framework along a different axis by adding polymorphic functions
+ that operate uniformly on all the subtypes of a given type. This thesis
+ unifies and extends prior work on intersection types and bounded
+ quantification, previously studied only in isolation, by investigating
+ theoretical and practical aspects of a typed $\lambda$-calculus
+ incorporating both.
+
+ The practical utility of this calculus, called $F_\land$ is
+ established by examples showing, for instance, that it allows a rich
+ form of ``coherent overloading'' and supports an analog of abstract
+ interpretation during typechecking; for example, the addition function
+ is given a type showing that it maps pairs of positive inputs to a
+ positive result, pairs of zero inputs to a zero result, etc. More
+ familiar programming examples are presented in terms of an extention
+ of Forsythe (an Algol-like language with intersection types),
+ demonstrating how parametric polymorphism can be used to simplify and
+ generalize Forsythe's design. We discuss the novel programming and
+ debugging styles that arise in $F_\land$.
+
+ We prove the correctness of a simple semi-decision procedure for the
+ subtype relation and the partial correctness of an algorithm for
+ synthesizing minimal types of $F_\land$ terms. Our main tool in this
+ analysis is a notion of ``canonical types,'' which allows proofs to be
+ factored so that intersections are handled separately from the other
+ type constructors.
+
+ A pair of negative results illustrates some subtle complexities of
+ $F_\land$. First, the subtype relation of $F_\land$ is shown to be
+ undecidable; in fact, even the sutype relation of pure second-order
+ bounded quantification is undecidable, a surprising result in its own
+ right. Second, the failure of an important technical property of the
+ subtype relation -- the existence of least upper bounds -- indicates
+ that typed semantic models of $F_\land$ will be more difficult to
+ construct and analyze than the known typed models of intersection
+ types. We propose, for future study, some simpler fragments of
+ $F_\land$ that share most of its essential features, while recovering
+ decidability and least upper bounds.
+
+ We study the semantics of $F_\land$ from several points of view. An
+ untyped model based on partial equivalence relations demonstrates the
+ consistency of the typing rules and provides a simple interpolation
+ for programs, where ``$\sigma$ is a subtype of $\tau$'' is read as
+ ``$\sigma$ is a subset of $\tau$.'' More refined models can be
+ obtained using a translation from $F_\land$ into the pure polymorphic
+ $\lambda$-calculus; in these models, ``$\sigma$ is a subtype of
+ $\tau$'' is interpreted by an explicit coercion function from $\sigma$
+ to $\tau$. The nonexistence of least upper bounds shows up here in
+ the failure of known techniques for proving the coherence of the
+ translation semantics. Finally, an equational theory of equivalences
+ between $F_\land$ terms is presented and its soundness for both styles
+ of model is verified.",
+ paper = "Pier91.pdf"
+}
+
+\end{chunk}
+
+\index{Pierce, Benjamin C.}
+\begin{chunk}{axiom.bib}
+@techreport{Pier91a,
+ author = "Pierce, Benjamin C.",
+ title = "Bounded Quantification is Undecidable",
+ year = "1991",
+ number = "CMU-CS-91-161",
+ link = "\url{http://repository.cmu.edu/cgi/viewcontent.cgi?article=3059}",
+ abstract =
+ "$$F_\le$ is a typed $\lambda$-calculus with subtyping and bounded
+ second-order polymorphism. First introduced by Cardelli and Wegner, it
+ has been widely studied as a core calculus for type systems with
+ subtyping.
+
+ Curien and Ghelli proved the partial correctness of a recursive
+ procedure for computing minimal types of $$F_\le$ terms and showed
+ that the termination of this procedure is equivalent to the
+ termination of its major component, a procedure for checking the
+ subtype relation between $$F_\le$ types. Ghelli later claimed that
+ this procedure is also guaranteed to terminate, but the discovery of a
+ subtle bug in his proof led him recently to observe that, in fact,
+ there are inputs on which the subtyping procedure diverges. This
+ reopens the question of the decidability of subtyping and hence of
+ typechecking.
+
+ This question is settled here in the negative, using a reduction from
+ the halting problem for two-counter Turing machines to show that the
+ subtype relation of $$F_\le$ is undecidable.",
+ paper = "Pier91a.pdf"
+}
+
+\end{chunk}
+
+\index{Meyer, Bertrand}
+\begin{chunk}{axiom.bib}
+@book{Meye97,
+ author = "Meyer, Bertrand",
+ title = "Object-Oriented Software Construction",
+ year = "1997",
+ publisher = "Prentice Hall"
+}
+
+\end{chunk}
+
+\index{Goldberg, Adele}
+\index{Robson, David}
+\begin{chunk}{axiom.bib}
+@book{Gold83,
+ author = "Goldberg, Adele and Robson, David",
+ title = "Smalltalk-80: The Language and Its Implementation",
+ publisher = "Addison-Wesley",
+ year = "1983"
+}
+
+\end{chunk}
+
+\index{Kirkerud, Bjorn}
+\begin{chunk}{axiom.bib}
+@book{Kirk89,
+ author = "Kirkerud, Bjorn",
+ title = "Object-Oriented Programming With Simula",
+ year = "1989",
+ series = "International Computer Science Series",
+ publisher = "Addison-Wesley"
+}
+
+\end{chunk}
+
+\index{Birtwistle, Graham M.}
+\begin{chunk}{axiom.bib}
+@book{Birt80,
+ author = "Birtwistle, Graham M.",
+ title = "Simula Begin",
+ year = "1980",
+ publisher = "Chartwell-Bratt",
+ isbn = "9780862380090"
+}
+
+\end{chunk}
+
+\index{Stroustrup, Bjarne}
+\begin{chunk}{axiom.bib}
+@book{Stro95,
+ author = "Stroustrup, Bjarne",
+ title = "The C++ Programming Language (2nd Edition)",
+ publisher = "Addison-Wesley",
+ year = "1995",
+ isbn = "0-201-53992-6"
+}
+
+\end{chunk}
+
+\index{Bruce, Kim B.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Bruc93,
+ author = "Bruce, Kim B.",
+ title = "Safe type checking in a statically-typed object-oriented
+ programming language",
+ booktitle = "POPL 93",
+ year = "1993",
+ isbn = "0-89791-560-7",
+ pages = "285-298",
+ abstract =
+ " In this paper we introduce a statically-typed, functional,
+ object-oriented programming language, TOOPL, which supports classes,
+ objects, methods, instance variable, subtypes, and inheritance. It has
+ proved to be surprisingly difficult to design statically-typed
+ object-oriented languages which are nearly as expressive as Smalltalk
+ and yet have no holes in their typing systems. A particular problem
+ with statically type checking object-oriented languages is determining
+ whether a method provided in a superclass will continue to type check
+ when inherited in a subclass. This program is solved in our language
+ by providing type checking rules which guarantee that a method which
+ type checks as part of a class will type check correctly in all legal
+ subclasses in which it is inherited. This feature enables library
+ providers to provide only the interfaces of classes with executables
+ and still allow users to safely create subclasses. The design of TOOPL
+ has been guided by an analysis of the semantics of the language, which
+ is given in terms of a sufficiently rich model of the F-bounded
+ second-order lambda calculus. This semantics supported the language
+ design by providing a means of proving that the type-checking rules
+ for the language are sound, ensuring that well-typed terms produce
+ objects of the appropriate type. In particular, in a well-typed
+ program it is impossible to send a message to an object which lacks a
+ corresponding method.",
+ paper = "Bruc93.pdf"
+}
+
+\end{chunk}
+
+\index{Abdali, S. Kamal}
+\index{Cherry, Guy W.}
+\index{Soiffer, Neil}
+\begin{chunk}{axiom.bib}
+@inproceedings{Abda86,
+ author = "Abdali, S. Kamal and Cherry, Guy W. and Soiffer, Neil",
+ title = "A Smalltalk System for Algebraic Manipulation",
+ booktitle = "OOPSLA 86",
+ pages = "277-293",
+ year = "1986",
+ abstract =
+ "This paper describes the design of an algebra system Views
+ implemented in Smalltalk. Views contains facilities for dynamic
+ creation and manipulation of computational domains, for viewing these
+ domains as various categories such as groups, rings, or fields, and
+ for expressing algorithms generically at the level of categories. The
+ design of Views has resulted in the addition of some new abstractions
+ to Smalltalk that are quite useful in their own right. Parameterized
+ classes provide a means for run-time creation of new classes that
+ exhibit generally very similar behavior, differing only in minor ways
+ that can be described by different instantiations of certain
+ parameters. Categories allow the abstraction of the common behavior of
+ classes that derives from the class objects and operations satisfying
+ certain laws independently of the implementation of those objects and
+ operations. Views allow the run-time association of classes with
+ categories (and of categories with other categories), facilitating the
+ use of code written for categories with quite different
+ interpretations of operations. Together, categories and views provide
+ an additional mechanism for code sharing that is richer than both
+ single and multiple inheritance. The paper gives algebraic as well as
+ non-algebraic examples of the above-mentioned features.",
+ paper = "Abda86.pdf",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Berger, Emery}
+\begin{chunk}{axiom.bib}
+@techreport{Berg92,
+ author = "Berger, Emery",
+ title = "FP + OOP = Haskell",
+ institution = "University of Texas",
+ number = "TR-92-30",
+ abstract =
+ "The programming language Haskell adds object-oriented functionality
+ (using a concept known as type classes) to a pure functional
+ programming framework. This paper describes these extensions and
+ analyzes its accomplishments as well as some problems."
+}
+
+\end{chunk}
+
+\index{Barendregt, H. P.}
+\begin{chunk}{axiom.bib}
+@book{Bare84,
+ author = "Barendregt, H. P.",
+ title = "The Lambda Calculus: Its Syntax and Semantics",
+ publisher = "Elsevier Science",
+ year = "1984"
+}
+
+\end{chunk}
+
+\index{Goguen, Joseph}
+\index{Meseguer, Jose}
+\begin{chunk}{axiom.bib}
+@article{Gogu92,
+ author = "Goguen, Joseph and Meseguer, Jose",
+ title = "Order-sorted Algebra I : Equational Deduction for Multiple
+ Inheritance, Overloading, Exceptions, and Partial Operations",
+ journal = "Theoretical Computer Science",
+ volume = "105",
+ number = "2"
+ year = "1992",
+ pages = "217-273",
+ abstract =
+ "This paper generalizes many-sorted algebra (MSA) to order-sorted
+ algebra (OSA) by allowing a partial ordering relation on the set of
+ sorts. This supports abstract data types with multiple inheritance (in
+ roughly the sense of object-oriented programming), several forms of
+ polymorphism and overloading, partial operations (as total on
+ equationally defined subsorts), exception handling, and an operational
+ semantics based on term rewriting. We give the basic algebraic
+ constructions for OSA, including quotient, image, product and term
+ algebra, and we prove their basic properties, including quotient,
+ homomorphism, and initiality theorems. The paper's major mathematical
+ results include a notion of OSA deduction, a completeness theorem for
+ it, and an OSA Birkhoff variety theorem. We also develop conditional
+ OSA, including initiality, completeness, and McKinsey-Malcev
+ quasivariety theorems, and we reduce OSA to (conditional) MSA, which
+ allows lifting many known MSA results to OSA. Retracts, which
+ intuitively are left inverses to subsort inclusions, provide
+ relatively inexpensive run-time error handling. We show that it is
+ safe to add retracts to any OSA signature, in the sense that it gives
+ rise to a conservative extension. A final section compares and
+ contrasts many different approaches to OSA. This paper also includes
+ several examples demonstrating the flexibility and applicability of
+ OSA, including some standard benchmarks like stack and list, as well
+ as a much more substantial example, the number hierarchy from the
+ naturals up to the quaternions.",
+ paper = "Gogu92.pdf"
+}
+
+\end{chunk}
+
+\index{Cardelli, Luca}
+\begin{chunk}{axiom.bib}
+@inproceedings{Card86,
+ author = "Cardelli, Luca",
+ title = "Typechecking Dependent Types and Subtypes",
+ link =
+ "\url{http://lucacardelli.name/Papers/Dependent%20Typechecking.US.pdf}",
+ year = "1996",
+ journal = "LNCS",
+ volume = "523",
+ pages = "45-57"
+ paper = "Card86.pdf"
+}
+
+\end{chunk}
+
+\index{Zariski, Oscar}
+\index{Samuel, Pierre}
+\begin{chunk}{axiom.bib}
+@book{Zari75,
+ author = "Zariski, Oscar and Samuel, Pierre",
+ title = "Commutative Algebra",
+ Series = "Graduate Texts in Mathematics",
+ year = "1975",
+ publisher = "Springer-Verlag",
+ isbn = "978-0387900896"
+}
+
+\end{chunk}
+
+\index{Marcus, Daniel A.}
+\begin{chunk}{axiom.bib}
+@book{Marc77,
+ author = "Marcus, Daniel A.",
+ title = "Number Fields",
+ publisher = "Springer",
+ year = "1977",
+ isbn = "978-0387902791"
+}
+
+\end{chunk}
+
+\index{Lang, Serge}
+\begin{chunk}{axiom.bib}
+@book{Lang05,
+ author = "Lang, Serge",
+ title = "Algebra",
+ publisher = "Springer",
+ year = "2005",
+ series = "Graduate Texts in Mathematics",
+ isbn = "978-0387953854"
+}
+
+\end{chunk}
+
+\index{Fuh, You-Chin}
+\index{Mishra, Prateek}
+\begin{chunk}{axiom.bib}
+@article{Fuhx89,
+ author = "Fuh, You-Chin and Mishra, Prateek",
+ title = "Polymorphic Subtype Inference -- Closing the Theory-Practice Gap",
+ journal = "Lecture Notes in Computer Science",
+ volume = "352",
+ year = "1989",
+ pages = "167-183",
+ paper = "Fuhx89.pdf"
+}
+
+\end{chunk}
+
+\index{Kaes, Stefan}
+\begin{chunk}{axiom.bib}
+@article{Kaes92,
+ author = "Kaes, Stefan",
+ title = "Type Inference in the Presence of Overloading, Subtyping, and
+ Recursive Types",
+ journal = "LISP Pointers",
+ volume = "V",
+ number = "1",
+ pages = "193-204"
+ year = "1992",
+ paper = "Kaes92.pdf"
+}
+
+\end{chunk}
+
+\index{Robinson, J. S. Derek}
+\begin{chunk}{axiom.bib}
+@book{Robi96,
+ author = "Robinson, J. S. Derek",
+ title = "A Course in the Theory of Groups",
+ year = "1996",
+ series = "Graduate Texts in Mathematics",
+ isbn = "978-1-4612-6443-9",
+ publisher = "Springer"
+}
+
+\end{chunk}
+
+\index{Thatte, Satish R.}
+\begin{chunk}{axiom.bib}
+@article{That91,
+ author = "Thatte, Satish R.",
+ title = "Coercive Type Isomorphism",
+ journal = "LNCS",
+ volume = "523",
+ year = "1991",
+ pages = "29-49",
+ abstract =
+ "There is a variety of situations in programming in which it is useful
+ to think of two distinct types as representations of the same abstract
+ structure. However, language features which allow such relations to
+ be effectively expressed at an abstract level are lacking. We propose
+ a generalization of ML-style type inference to deal effectively with
+ this problem. Under the generalization, the (normally free) algebra
+ of type expressions is subjected to an equational theory generated by
+ a finite set of user-specified equations that express
+ interconvertibility relations between objects of ``equivalent'' types.
+ Each type equation is accompanied by a pair of conversion functions
+ that are (at least partial) inverses. We show that so long as the
+ equational theory satisfies a reasonably permissive syntactic
+ constraint, the resulting type system admits a complete type infer-
+ ence algorithm that produces unique principal types. The main
+ innovation required in type inference is the replacement of ordinary
+ free unification by unification in the user-specified equational
+ theory. The syntactic constraint ensures that the latter is unitary,
+ i.e., yields unique most general unifiers. The proposed constraint is
+ of independent interest as the first known syntactic
+ characterization for a class of unitary theories. Some of the
+ applicatloils of the system are similar to those of Wadler's views
+ [Wad87]. However, our system is considerably more general, and more
+ orthogonal to the underlying language.",
+ paper = "That91.pdf"
+}
+
+\end{chunk}
+
+\index{Bundgen, Reinhard}
+\begin{chunk}{axiom.bib}
+@book{Bund93,
+ author = "Bundgen, Reinhard",
+ title = "The ReDuX System Documentation",
+ year = "1993",
+ publisher = "WSI"
+}
+
+\end{chunk}
+
+\index{Bundgen, Reinhard}
+\begin{chunk}{axiom.bib}
+@inproceedings{Bund93a,
+ author = "Bundgen, Reinhard",
+ title = {Reduce the Redex $->$ ReDuX},
+ booktitle = "Proc. Rewriting Techniques and Applications 93",
+ year = "1993",
+ pages = "446-450",
+ publisher = "Springer-Verlag",
+ isbn = "3-540-56868-9"
+}
+
+\end{chunk}
+
+\index{Cohn, P. M.}
+\begin{chunk}{axiom.bib}
+@book{Cohn91,
+ author = "Cohn, P. M.",
+ title = "Algebra",
+ publisher = "John Wiley and Sons",
+ year = "1991",
+ isbn = "0471101680",
+ paper= = "Cohn91.pdf"
+}
+
+\end{chunk}
+
+\index{Jouannaud, Jean-Pierre}
+\index{Kirchner, Claude}
+\begin{chunk}{axiom.bib}
+@book{Joua90,
+ author = "Jouannaud, Jean-Pierre and Kirchner, Claude",
+ title = "Solving Equations in Abstract Algebras: A Rule-based Survey of
+ Unification",
+ year = "1990",
+ publisher = "Universite do Paris-Sud"
+}
+
+\end{chunk}
+
+\index{Kowalsky, Hans Joachim}
+\begin{chunk}{axiom.bib}
+@book{Kowa63,
+ author = "Kowalsky, Hans Joachim",
+ title = "Linear Algebra",
+ year = "1963",
+ publisher = "Walter de Gruyter",
+ comment = "(German)"
+}
+
+\end{chunk}
+
+\index{Reynolds, John C.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Reyn80,
+ author = "Reynolds, John C.",
+ title = "Using Category Theory to Design Implicit Conversions and
+ Generic Operators",
+ booktitle = "Lecture Notes in Computer Science",
+ year = "1980",
+ abstract =
+ "A generalization of many-sorted algebras, called category-sorted
+ algebras, is defined and applied to the language-design problem of
+ avoiding anomalies in the interaction of implicit conversions and
+ generic operators. The definition of a simple imperative language
+ (without any binding mechanisms) is used as an example.",
+ paper = "Reyn80.pdf"
+}
+
+\end{chunk}
+
+
+\index{Stansifer, R.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Stan88,
+ author = "Stansifer, R.",
+ title = "Type Inference with Subtypes",
+ booktitle = "POPL 88",
+ pages = "88-97"
+ year = "1988",
+ abstract =
+ "We give an algorithm for type inference in a language with functions,
+ records, and variant records. A similar language was studied by
+ Cardelli who gave a type checking algorithm. This language is
+ interesting because it captures aspects of object-oriented programming
+ using subtype polymorphism. We give a type system for deriving types
+ of expressions in the language and prove the type inference algorithm
+ is sound, i.e., it returns a type derivable from the proof system. We
+ also prove that the type the algorithm finds is a ``principal'' type,
+ i.e., one which characterizes all others. The approach taken here is
+ due to Milner for universal polymorphism. The result is a synthesis of
+ subtype polymorphism and universal polymorphism.",
+ paper = "Stan88.pdf"
+}
+
+\end{chunk}
+
+\index{Huet, Gerard}
+\index{Oppen, Derek C.}
+\begin{chunk}{axiom.bib}
+@techreport{Huet80,
+ author = "Huet, Gerard and Oppen, Derek C.",
+ title = "Equations and Rewrite Rules: A Survey",
+ institution = "Stanford Verification Group",
+ number = "STAN-CS-80-785",
+ year = "1980",
+ abstract =
+ "Equations occur frequently in mathematics, logic and computer
+ science. in this paper, we survey the main results concerning
+ equations, and the methods available for reasoning about them and
+ computing with them. The survey is self-contained and unified, using
+ traditional abstract algebra.
+
+ Reasoning about equations may involve deciding if an equation follows
+ from a given set of equations (axioms), or if an equation is true in a
+ given theory. When used in this manner, equations state properties
+ that hold between objects. Equations may also be used as definitions;
+ this use is well known in computer science: programs written in
+ applicative languages, abstract interpreter definitions, and algebraic
+ data type definitions are clearly of this nature. When these
+ equations are regarded as oriented ``rewrite rules'', we may actually
+ use them to compute.
+
+ In addition to covering these topics, we discuss the problem of
+ ``solving'' equations (the ``unification'' problem), the problem of
+ proving termination of sets of rewrite rules, and the decidability and
+ complexity of word problems and of combinations of equational
+ theories. We restrict ourselves to first-order equations, and do not
+ treat equations which define non-terminating computations or recent
+ work on rewrite rules applied to equational congruence classes.",
+ paper = "Huet80.pdf"
+}
+
+\end{chunk}
+
+\index{Remy, Didier}
+\begin{chunk}{axiom.bib}
+@inproceedings{Remy89,
+ author = "Remy, Didier",
+ title = "Typechecking Records and Variants in a Natural Extension of ML",
+ booktitle = "POPL 89",
+ isbn = "978-0-89791-294-5",
+ publisher = "ACM",
+ link = "\url{https://www.cs.cmu.edu/~aldrich/courses/819/row.pdf}",
+ abstract =
+ "We describe an extension of ML with records where inheritance is
+ given by ML generic polymorphism. All common operations on records but
+ concatenation are supported, in particular, the free extension of
+ records. Other operations such as renaming of fields are added. The
+ solution relies on an extension of ML, where the language of types is
+ sorted and considered modulo equations, and on a record extension of
+ types. The solution is simple and modular and the type inference
+ algorithm is efficient in practice.",
+ paper = "Remy89.pdf"
+}
+
+\end{chunk}
+
+\index{Wand, Mitchell}
+\index{O'Keefe, Patrick}
+\begin{chunk}{axiom.bib}
+@inproceedings{Wand89,
+ author = "Wand, Mitchell and O'Keefe, Patrick",
+ title = "On the Complexity of Type Inference with Coercion",
+ booktitle = "PFCA 89",
+ pages = "293-298",
+ isbn = "0-89791-328-0",
+ abstract =
+ "We consider the following problem: Given a partial order $(C,\le)$ of
+ base types and coercions between them, a set of constants with types
+ generated from $C$, and a term $M$ in the lambda calculus with these
+ constants, does $M$ have a typing with this set of types? This
+ problem abstracts the problem of typability over a fixed set of base
+ types and coercions (e.g. int $\le$ real, or a fixed set of coercions
+ between opaque data types). We show that in general, the problem of
+ typability of lambda-terms over a given partially-ordered set of base
+ types is NP-complete. However, if the partial order is known to be a
+ tree, then the satisfiability problem is solvable in (low-order)
+ polynomial time. The latter result is of practical importance, as
+ trees correspond to the coercion structure of single-inheritance
+ object systems.",
+ paper = "Wand89.pdf"
+}
+
+\end{chunk}
+
+\index{Lincoln, Patrick}
+\index{Mitchell, John C.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Linc92,
+ author = "Lincoln, Patrick and Mitchell, John C.",
+ title = "Algorithmic Aspects of Type Inference with Subtypes",
+ booktitle = "POPL 92",
+ pages = "293-304",
+ year = "1992",
+ abstract =
+ "We study the complexity of type inference for programming languages
+ with subtypes. There are three language variations that effect the
+ problem: (i) basic functions may have polymorphic or more limited
+ types, (ii) the subtype hierarchy may be fixed or vary as a result of
+ subtype declarations within a program, and (iii) the subtype hierarchy
+ may be an arbitrary partial order or may have a more restricted form,
+ such as a tree or lattice. The naive algorithm for infering a most
+ general polymorphic type, undervariable subtype hypotheses, requires
+ deterministic exponential time. If we fix the subtype ordering, this
+ upper bound grows to nondeterministic exponential time. We show that
+ it is NP-hard to decide whether a lambda term has a type with respect
+ to a fixed subtype hierarchy (involving only atomic type names). This
+ lower bound applies to monomorphic or polymorphic languages. We give
+ PSPACE upper bounds for deciding polymorphic typability if the subtype
+ hierarchy has a lattice structure or the subtype hierarchy varies
+ arbitrarily. We also give a polynomial time algorithm for the limited
+ case where there are of no function constants and the type hierarchy
+ is either variable or any fixed lattice.",
+ paper = "Linc92.pdf"
+}
+
+\end{chunk}
+
+\index{Davis, Martin D.}
+\index{Sigal, Ron}
+\index{Weyuker, Elaine J.}
+\begin{chunk}{axiom.bib}
+@book{Davi94,
+ author = "Davis, Martin D. and Sigal, Ron and Weyuker, Elaine J.",
+ title = "Computability, Complexity, and Languages: Fundamentals of
+ Theoretical Computer Science",
+ publisher = "Academic Press",
+ year = "1994",
+ isbn = "978-0122063824"
+}
+
+\end{chunk}
+
+\index{Farmer, William M.}
+\begin{chunk}{axiom.bib}
+@article{Farm90,
+ author = "Farmer, William M.",
+ title = "A Partial Functions Version of Church's Simple Theory of Types",
+ journal = "The Journal of Symbolic Logic",
+ volume = "55",
+ number = "3",
+ year = "1990",
+ pages = "1269-1291",
+ abstract =
+ "Church's simple theory of types is a system of higher-order logic in
+ which functions are assumed to be total. We present in this paper a
+ version of Church's system called PF in which functions may be
+ partial. The semantics of PF, which is based on Henkin's
+ general-models semantics, allows terms to be nondenoting but requires
+ formulas to always denote a standard truth value. We prove that PF is
+ complete with respect to its semantics. The reasoning mechanism in PF
+ for partial functions corresponds closely to mathematical practice,
+ and the formulation of PF adheres tightly to the framework of
+ Church's system.",
+ paper = "Farm90.pdf"
+}
+
+\end{chunk}
+
+\index{Odifreddi, Piergiorgio}
+\begin{chunk}{axiom.bib}
+@book{Odif92,
+ author = "Odifreddi, Piergiorgio",
+ title = "Classical Recursion Theory: The Theory of Functions and Sets of
+ Natural Numbers",
+ publisher = "Elsevier",
+ year = "1992"
+}
+
+\end{chunk}
+
+\index{Buchberger, Bruno}
+\index{Collins, George Edwin}
+\index{Loos, Rudiger}
+\begin{chunk}{axiom.bib}
+@book{Buch82,
+ author = "Buchberger, Bruno and Collins, George Edwin and Loos, Rudiger",
+ title = "Computer Algebra: Symbolic and Algebraic Computation",
+ publisher = "Springer",
+ isbn = "978-3-211-81684-4",
+ paper = "Buch82.pdf"
+}
+
+\end{chunk}
+
+\index{Lauer, M.}
+\begin{chunk}{axiom.bib}
+@InCollection{Laue82,,
+ author = "Lauer, M.",
+ title = "Computing by Homomorphic Images",
+ booktitle = "Computer Algebra: Symbolic and Algebraic Computation",
+ pages = "139-168",
+ year = "1982",
+ publisher = "Springer",
+ isbn = "978-3-211-81684-4",
+ abstract =
+ "After explaining the general technique of Computing by homomorphic
+ images, the Chinese remainder algorithm and the Hensel lifting
+ construction are treated extensively. Chinese remaindering is first
+ presented in an abstract setting. Then the specialization to Euclidean
+ domains, in particular $\mathbb{Z}$, $\mathbb{K}[y]$, and
+ $\mathbb{Z}[y_1,\ldots,y_n]$ is considered. For both techniques,
+ Chinese remaindering as well as the lifting algorithms, a complete
+ computational example is presented and the most frequent application
+ is discussed."
+}
+
+\end{chunk}
+
+\index{Huet, Gerard}
+\index{Plotkin, G.}
+\begin{chunk}{axiom.bib}
+@book{Huet91,
+ author = "Huet, Gerard and Plotkin, G.",
+ title = "Logical Frameworks",
+ publisher = "Cambridge University",
+ year = "1991"
+}
+
+\end{chunk}
+
+\index{Harper, Robert}
+\index{Honsell, Furio}
+\index{Plotkin, Gordon}
+@article{Harp93,
+ author = "Harper, Robert and Honsell, Furio and Plotkin, Gordon",
+ title = "A Framework for Defining Logics",
+ journal = "J. ACM",
+ volume = "40",
+ number = "1",
+ year = "1993",
+ pages = "143-184",
+ abstract =
+ "The Edinburgh Logical Framework (LF) provides a means to define (or
+ present) logics. It is based on a general treatment of syntax, rules,
+ and proofs by means of a typed &lgr;-calculus with dependent
+ types. Syntax is treated in a style similar to, but more general than,
+ Martin-Lo¨f's system of arities. The treatment of rules and proofs
+ focuses on his notion of a judgment. Logics are represented in LF via
+ a new principle, the judgments as types principle, whereby each
+ judgment is identified with the type of its proofs. This allows for a
+ smooth treatment of discharge and variable occurence conditions and
+ leads to a uniform treatment of rules and proofs whereby rules are
+ viewed as proofs of higher-order judgments and proof checking is
+ reduced to type checking. The practical benefit of our treatment of
+ formal systems is that logic-independent tools, such as proof editors
+ and proof checkers, can be constructed.",
+ paper = "Harp93.pdf"
+}
+
+\end{chunk}
+
+\index{Pfenning, Frank}
+\begin{chunk}{axiom.bib}
+@inproceedings{Pfen89,
+ author = "Pfenning, Frank",
+ title = "Elf: A Language for Logic Definition and Verified Metaprogramming",
+ booktitle = "Logic in Computer Science 89",
+ year = "1989",
+ pages = "313-322",
+ abstract =
+ "We describe Elf, a metalanguage for proof manipulation environments
+ that are independent of any particular logical system. Elf is
+ intended for meta-programs such as theorem provers, proof
+ transformers, or type inference programs for programming languages
+ with complex type systems. Elf unifies logic definition (in the style
+ of LF, the Edinburgh Logical Framework) with logic programming (in the
+ style of $\lambda$-Prolog). It achieves this unification by giving
+ types an operational interpretation, much the same way that Prolog gives
+ certain formulas (Horn-clauses) an operational interpretation.
+ Novel features of Elf include: (1) the Elf search process
+ automatically constructs terms that can represent object-logic proofs,
+ and thus a program need not construct them explicitly, (2) the partial
+ correctness of meta-programs with respect to a given logic can be
+ expressed and proved in Elf itself, and (3) Elf exploits Elliott’s
+ unification algorithm for a $\lambda$-calculus with dependent types.",
+ paper = "Pfen89.pdf"
+}
+
+\end{chunk}
+
+\index{Pfenning, Frank}
+\begin{chunk}{axiom.bib}
+@inproceedings{Pfen91,
+ author = "Pfenning, Frank",
+ title = "Logic Programming in the LF Logical Framework",
+ booktitle = "Proc. First Workshop on Logical Frameworks",
+ year = "1991",
+ paper = "Pfen91.pdf"
+}
+
+\end{chunk}
+
+\index{Pfenning, Frank}
+\begin{chunk}{axiom.bib}
+@inproceedings{Pfen91a,
+ author = "Pfenning, Frank",
+ title = "Unification and Anti-Unification in the Calculus of Constructions",
+ booktitle = "Logic in Computer Science 91",
+ year = "1991",
+ pages = "74-85",
+ abstract =
+ "We present algorithms for unification and anti- unification in the
+ Calculus of Constructions, where occurrences of free variables (the
+ variables subject to instantiation) are restricted to higher-order
+ patterns, a notion investigated for the simply-typed $\lambda$-calculus
+ by Miller. Most general unifiers and least common anti-instances are
+ shown to exist and are unique up to a simple equivalence. The
+ unification algorithm is used for logic program execution and type and
+ term reconstruction in the current implementation of Elf and has
+ shown itself to be practical. The main application of the
+ anti-unification algorithm we have in mind is that of proof
+ generalization.",
+ paper = "Pfen91a.pdf"
+}
+
+\end{chunk}
diff --git a/src/axiom-website/patches.html b/src/axiom-website/patches.html
index b4dba1d..ec72a72 100644
--- a/src/axiom-website/patches.html
+++ b/src/axiom-website/patches.html
@@ -5760,6 +5760,8 @@ readme update credits list**

books/bookvol10.1 add Tutorial on Quantifier Elimination by Hong

20170627.01.tpd.patch
readme add June Huh quote

+20170702.01.tpd.patch
+books/bookvol10.1 add Weber thesis

--
1.7.5.4