\documentclass[11pt, a4paper]{article}
\usepackage{amsmath,amsfonts,amssymb,amsthm}
\setlength{\textwidth}{6.3in} \setlength{\textheight}{8.7in}
\setlength{\topmargin}{0pt} \setlength{\headsep}{0pt}
\setlength{\headheight}{0pt} \setlength{\oddsidemargin}{0pt}
\setlength{\evensidemargin}{0pt}
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newtheorem{theorem}{Th\'eor\`eme}%[section]
\newtheorem{thm}{Theorem}
\newtheorem{proposition}{Proposition}
\newtheorem{prop}{Proposition}
\newtheorem{corollary}{Corollaire}%[section]
\newtheorem{coro}{Corollary}%[section]
\newtheorem{lemma}{Lemma}%[section]
\newtheorem{fact}{Fact}%[section]
%\newtheorem{definition}{Definition}%[section]
%\newtheorem{example}{Example}
\newcommand\cyc{\mathop{\rm cyc}}
%\newenvironment{proof}{{\em Proof.}}{\mbox{}\hfill $\Box$\medskip}
\newenvironment{example}    {\smallskip\noindent{\bf Example.}} {\smallskip}
\newenvironment{remark}     {\smallskip\noindent{\bf Remark.}} {\smallskip}
\newenvironment{definition} {\smallskip\noindent{\bf Definition.}} {\smallskip}
%binomial coefficients:\bi{#1}{#2}
\newcommand\bi[2]{{{#1}\atopwithdelims(){#2}}}
%q-binomial coefficients:\qbi{#1}{#2}{#3}
\newcommand\qbi[3]{{{#1}\atopwithdelims[]{#2}}_{#3}}
\pagestyle{plain}
\newtheorem*{theo}{Th\'eor\`eme}
\newtheorem{conj}{Conjecture}
\newtheorem{lem}{Lemme}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\catcode`\@=11
\def\Eqalign #1{\null\,\vcenter{\openup\jot\m@th\ialign{
\strut\hfil$\displaystyle{##}$&$\displaystyle{{}##}$\hfil
&&\quad\strut\hfil$\displaystyle{##}$&$\displaystyle{{}##}$
\hfil\crcr #1\crcr}}\,}
\catcode`\@=12
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%



\numberwithin{equation}{section}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\def\refname{References}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
\title{The Algebra of Set Functions I~: \\ The Product Theorem and Duality}
\author{Bodo Lass\\
\small Universit\'e de Lyon ; Universit\'e Lyon 1 ; \\[-0.8ex]
\small INSA de Lyon, F-69621 ; Ecole Centrale de Lyon ; \\[-0.8ex]
\small CNRS, UMR5208, Institut Camille Jordan, \\[-0.8ex]
\small 43 blvd du 11 novembre 1918, \\[-0.8ex]
\small F-69622 Villeurbanne-Cedex, France \\[-0.8ex]
\small \texttt{lass} @ \texttt{math.univ-lyon1.fr}\\[-0.8ex]
}
\date{}
%\small 2000 Mathematics Subject Classification : 05}

\maketitle

\begin{abstract}
We give a comprehensive introduction to the algebra of set functions and its generating functions.
This algebraic tool allows us to formulate and prove a product theorem for the enumeration of
functions of many different kinds, in particular injective functions, surjective functions,
matchings and colourings of the vertices of a hypergraph. Moreover, we develop a
general duality theory for counting functions.
\end{abstract}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Introduction}

The algebra of set functions has been briefly introduced in order to count matchings~\cite{L1,L3,L4},
colourings and acyclic orientations~\cite{L2}. In this paper we want to study this algebra
in a more systematic way. Ordinary and exponential generating functions have always
been the favourite tool in enumerative combinatorics. Generating functions for set
functions offer the same advantages and increase the range of applications.
In fact, exponential generating functions operate on set functions,
and it is even possible to work with derivatives of set functions.
Combinatorial techniques like M\"obius inversion are then replaced
by standard algebraic operations. For example, the product rule for
differentiation reflects the most fundamental set theoretic fact~:
\begin{equation}
x \in X_1 \uplus X_2 \qquad \Leftrightarrow \qquad  x \in X_1 \quad \hbox{or} \quad x \in X_2.
\end{equation}

Many applications of the algebra of set functions have their origin in a product theorem
for the enumeration of functions. The basic idea is the following. If we have a function,
then we can look at the preimage of every element of its codomain. This defines a partition
of its domain into blocks. Moreover, the function is injective if and only if the size of every
block of the partition of the domain is~$0$ or~$1$, and it is surjective if and only if no block
is an empty set. Therefore, studying partitions of the domain makes it possible to count many kinds
of functions, and this is what the algebra of set functions can do via the product theorem.
It even works with multiplicative weights.

In Section~2 the basic theory of set functions is fully developed. Our fundamental result, namely the
product theorem, is stated and proved in Section~3. The last part, Section~4, is devoted to a general
``global'' duality theory for the enumeration of functions. This theory does not use the
``local'' set functions and works with weights which just have to form an abelian group.
Many applications to the enumeration of matchings in bipartite graphs will be given
in the subsequent article~\cite{L5}.



\section{The Algebra of Set Functions}

In this article, $A$ will always be a commutative ring with identity element.
Usually, $A$ is just the ring of integers~$\mathbb{Z}$, but it can also be
the field of complex numbers~$\mathbb{C}$ or the ring of polynomials with
complex coefficients~$\mathbb{C}[x]$.

Let us denote by~$\mathbb{N}$ the nonnegative integers. A sequence of numbers
is exactly the same as a function $f: \mathbb{N} \to A$. It is quite usual in
enumerative combinatorics to associate with such a function a power series, namely
\begin{eqnarray}
F_f(x) &=& \sum_{n=0}^\infty f(n) \cdot x^n \qquad \hbox{(ordinary generating function)} \qquad \hbox{or} \\
F_f(x) &=& \sum_{n=0}^\infty f(n) \cdot \frac{x^n}{n!} \qquad \hbox{(exponential generating function).}
\end{eqnarray}
In this way, the functions $f: \mathbb{N} \to A$ become an $A$-module. More precisely, for any $f,g: \mathbb{N} \to A$,
we can define the sum~$f+g: \mathbb{N} \to A$ with the help of the generating functions~: $F_{f+g}(x) = F_f(x)+F_g(x)$.
Of course, it was not necessary to use generating functions for this. We could have defined directly $(f+g)(n) = f(n)+g(n)$
for every $n \in \mathbb{N}$. In the same way, for any element of our commutative ring $a \in A$ and any function $f: \mathbb{N} \to A$,
we can define $a \cdot f: \mathbb{N} \to A$ either with the help of the generating functions~: $F_{a \cdot f}(x) = a \cdot F_f(x)$
or directly~: $(a \cdot f)(n) = a \cdot f(n)$. Last but not least, working with generating functions turns the functions $f: \mathbb{N} \to A$
into an algebra. Indeed, we can define the product $f \cdot g: \mathbb{N} \to A$ of two functions $f,g: \mathbb{N} \to A$ by
$F_{f \cdot g}(x) = F_f(x) \cdot F_g(x)$, but here we must pay attention. This definition means
\begin{eqnarray}
(f \cdot g)(n) &=& \sum_{k=0}^n f(k) \cdot g(n-k) \qquad \hbox{for ordinary generating functions} \qquad \hbox{but}\\
(f \cdot g)(n) &=& \sum_{k=0}^n \binom{n}{k} \cdot f(k) \cdot g(n-k) \qquad \hbox{for exponential generating functions.}
\end{eqnarray}
In other words, the algebra structure of number sequences depends on the generating functions that we work with.
In enumerative combinatorics, however, number sequences are not the only interesting objects.
We have seen already in the introduction that set functions appear naturally. Therefore
we want to study their algebra structure carefully.

Let $X$ be a finite set of cardinality~$n$. We denote by~$2^X$ the family of finite subsets of~$X$,
which has of course cardinality~$2^n$. Let $\mathcal{F}(2^X,A)$ be the $A$-algebra of set functions
$f,g: 2^X \to A$, equipped
with the multiplication
\begin{equation}
(f \cdot g)(X') \; = \; \sum_{X_1 \uplus X_2 = X'} f(X_1) \cdot g(X_2) \qquad \forall \quad \emptyset \subseteq X' \subseteq X
\end{equation}
(where $\uplus$ denotes disjoint union) and the obvious pointwise addition $(f+g)(X') = f(X')+g(X')$
and scalar multiplication $(a \cdot f)(X') = a \cdot f(X')$ for every $\emptyset \subseteq X' \subseteq X$
and for every $a \in A$.

\bigskip

\noindent
Next let $A[X]$ be the $A$-algebra of multiaffine (= square-free) polynomials
\begin{equation}
F(\chi) \; = \; \sum_{X' \subseteq X} a_{X'} \cdot \chi^{X'}, \qquad a_{X'} \in A \quad \forall \quad \emptyset \subseteq X' \subseteq X
\end{equation}
in the $n$ indeterminates $\chi = (\chi_x)_{x \in X}$, where we use the shorthand notation
\begin{equation}
\chi^{X'} \; = \; \prod_{x \in X'} \chi_x, \qquad \chi^{\emptyset} \; := \; 1.
\end{equation}
This algebra is equipped with the usual multiplication of polynomials followed by
extraction of the multiaffine part (i.e.~discarding all monomials that are not
of the form $\chi^{X'}$ for some $X' \subseteq X$), that is
\begin{equation}
\left( \sum_{X' \subseteq X} a_{X'} \cdot \chi^{X'} \right) \cdot \left( \sum_{X' \subseteq X} b_{X'} \cdot \chi^{X'} \right)
\; = \; \sum_{X' \subseteq X} \; \sum_{X_1 \uplus X_2 = X'} a_{X_1} b_{X_2} \cdot \chi^{X'},
\end{equation}
together with the usual addition and scalar multiplication. Note that $A[X]$ is isomorphic
in an obvious way to the quotient algebra $A[\{\chi_x\}]/\langle\{\chi_x^2\}\rangle$.

\begin{remark}
In a more combinatorial way, we could have defined the multiplication of monomials
for all $X_1, X_2 \subseteq X$ by
\begin{equation}
\chi^{X_1} \cdot \chi^{X_2} \; := \; \chi^{X_1+X_2}, \; \hbox{where}
\end{equation}
\begin{equation}
X_1+X_2 \; :=  \;
\left\{ \begin{array}{cl}
X_1 \cup X_2, & \hbox{if \, $X_1 \cap X_2 =   \emptyset$,} \\
\dag,         & \hbox{if \, $X_1 \cap X_2 \ne \emptyset$, \, where}
\end{array} \right.
\end{equation}
\begin{equation}
\dag + X' \; := \; \dag, \quad \dag + \dag \; := \; \dag, \qquad \hbox{and} \qquad \chi^{\dag} \; := \; 0.
\end{equation}
Here $\dag$ corresponds to multisets that are systematically discarded.
\end{remark}

\bigskip

\noindent
Finally, the map $f \mapsto F_f$ that associates the generating polynomial $F_f \in A[X]$
with each set function $f \in \mathcal{F}(2^X,A)$, i.e.
\begin{equation}
F_f(\chi) \; = \; \sum_{X' \subseteq X} f(X') \cdot \chi^{X'},
\end{equation}
is manifestly an algebra isomorphism of $\mathcal{F}(2^X,A)$ onto $A[X]$.
In other words, we have $F_{f \cdot g}(\chi) = F_f(\chi) \cdot F_g(\chi)$,
$F_{f+g}(\chi) = F_f(\chi) + F_g(\chi)$ and $F_{a \cdot f}(\chi) = a \cdot F_f(\chi)$
for every $f,g \in \mathcal{F}(2^X,A)$ and for all $a \in A$. Many applications of our
algebra of set functions $A[X]$ in different parts of enumerative graph theory can be found
in~\cite{L1}, \cite{L2}, \cite{L3} and \cite{L4}.

\bigskip

For $|X| = \infty$ let $(2^X)_{\text{fin}}$ be the partially ordered set of all finite subsets of $X$.
We have the canonical projections $p_{X_1,X_2}: A[X_1] \to A[X_2]$ ($X_1,X_2 \in (2^X)_{\text{fin}},
X_1 \supseteq X_2$) and define
\begin{equation}
A[X] \; := \; \lim_{\longleftarrow} A[X'], \quad X' \in (2^X)_{\text{fin}}
\end{equation}
with the help of the projective limit. This means nothing else
than working with generating functions of the form
\begin{equation}
F(\chi) \; = \; \sum_{X' \in (2^X)_{\text{fin}}} a_{X'} \cdot \chi^{X'}, \qquad a_{X'} \in A \quad \forall \quad \emptyset \subseteq X' \subseteq X.
\end{equation}

\bigskip

\noindent
Now for any $X$ (finite or infinite), we consider the particular element
\begin{equation}
\mathcal{X} \; := \; \sum_{x \in X} \chi^{\{x\}} \; = \; \sum_{x \in X} \chi_x
\end{equation}
in~$A[X]$~: it is the generating function for the indicator function of the subsets of~$X$
of cardinality~$1$. Then, in the product ${\mathcal X}^k$ in the algebra~$A[X]$,
each set of cardinality~$k$ occurs $k!$~times, so $\mathcal{X}^k/k!$
is the generating function for the indicator function of the subsets of~$X$
of cardinality~$k$.
If now $g: \mathbb{N} \to A$ is an $A$-valued function on the natural numbers, the identity
\begin{equation}
\sum_{k=0}^{\infty} g(k) \cdot \frac{\mathcal{X}^k}{k!} \; = \; \sum_{X' \in (2^X)_{\text{fin}}} g(|X'|) \cdot \chi^{X'}
\end{equation}
provides an embedding of the algebra $A![[\mathcal{X}]]$ of generating functions of exponential type
(usually the variable is called $x$ instead of $\mathcal{X}$) into our algebra $A[X]$
\emph{if and only if} $|X| = \infty$. Simply remark that if $k > |X|$, then $\mathcal{X}^k/k! = 0$.
If $|X| = \infty$, the image of this embedding is the subalgebra of~$A[X]$ consisting
of all generating functions~$F_f$ of set functions~$f$, where the value depends only on the
cardinality of the set, i.e.~$f(X') = g(|X'|)$ for every $X' \in (2^X)_{\text{fin}}$.
This embedding is at the origin of (almost?) all the applications of $A![[\mathcal{X}]]$
in combinatorics, but it requires the existence of an infinite combinatorial model depending
just on cardinalities. Consequently, $A[X]$ provides more flexibility and closeness to combinatorics;
it is also ideally suited for computer calculations.

\begin{remark} The ring ${\mathbb Z}![[\mathcal{X}]]$ is not noetherian,
but it contains the important functions $\exp(\mathcal{X})$ and $\log(1+\mathcal{X})$.
\end{remark}

\begin{example} If $char \; A = 2$, then we have
\begin{eqnarray}
(1+\mathcal{X})^{-1} &=& \sum_{k=0}^{\infty} (-1)^k k! \cdot \frac{\mathcal{X}^k}{k!} \\
      &\equiv& 1+\mathcal{X} \quad \hbox{and} \\
\log(1+\mathcal{X})  &=& \sum_{k=1}^{\infty} (-1)^{k-1} (k-1)! \cdot \frac{\mathcal{X}^k}{k!} \\
      &\equiv& \mathcal{X}+\frac{\mathcal{X}^2}{2}
\end{eqnarray}
in the ring $A![[\mathcal{X}]]$. These identities are at the origin of lots of results on parity in combinatorics;
see \cite{L4}.
\end{example}

\medskip

\noindent
For all $t \in A$ let $(t \cdot \chi)^{X'} \; := \; t^{|X'|} \cdot \chi^{X'}$, $\; X' \in (2^X)_{\text{fin}}$, and therefore
\begin{equation}
F_f(t \cdot \chi) \; = \; \sum_{X' \in (2^X)_{\text{fin}}} f(X') \; t^{|X'|} \cdot \chi^{X'},
\end{equation}
where $f: (2^X)_{\text{fin}} \to A$ is an arbitrary set function.
It is evident that this definition is compatible with the addition and the multiplication,
in particular $(t \cdot \chi)^{X_1} \cdot (t \cdot \chi)^{X_2} = (t \cdot \chi)^{X_1+X_2}$.
Most important are the special cases $t = -1$ and $t = 0$: $F_f(0) = F_f(0 \cdot \chi) = f(\emptyset)$.

\bigskip

\noindent
We define the degree of a set function $f: (2^X)_{\text{fin}} \to A$ by
\begin{equation}
\hbox{deg} \; F_f(\chi) \; := \; \min \{n \in \mathbb{N} \, | \, \exists \; X' \in (2^X)_{\text{fin}} \; \hbox{such that} \; |X'| = n \; \hbox{and} \; f(X') \ne 0 \},
\end{equation}
where the minimum over an empty set is $\infty$, that is, the set function which is zero for every subset of~$X$ has the degree~$\infty$.
Our definition implies $\hbox{deg}\left(\chi^{X_1} \cdot \chi^{X_2}\right) \ge \hbox{deg}\left(\chi^{X_1}\right) + \hbox{deg}\left(\chi^{X_2}\right)$
and, more generally,
\begin{equation}
\hbox{deg}\left(F_f(\chi) \cdot F_g(\chi)\right) \; \ge \; \hbox{deg}\left(F_f(\chi)\right) + \hbox{deg}\left(F_g(\chi)\right)
\end{equation}
for arbitrary $f,g: (2^X)_{\text{fin}} \to A$. It is interesting to remark that these inequalities are not satisfied
for other natural definitions of $X_1 + X_2$ such as $X_1 + X_2 = X_1 \cup X_2$.

If $F_f(0) = f(\emptyset) = 0$, i.e.~deg$\left(F_f(\chi)\right) \ge 1$, then deg$\left(F_f(\chi)^k\right) \ge k$ for every $k \in \mathbb{N}$.
Moreover, $F_f(\chi)^k/k!$ is defined for any ring $A$, because a partition into $k$ {\it nonempty} subsets can be ordered in $k!$ different ways.
Thus, we have an operation of $A![[\mathcal{X}]]$ on $A[X]$ via the substitution $G(F_f (\chi))$ defined for any $G \in A![[\mathcal{X}]]$
(all calculations in $A$ are finite). The following proposition is now easy to prove.

\begin{proposition}
The set function $F_f(\chi)$ is invertible if and only if $F_f(0) = f(\emptyset)$ is invertible.
In that case $F_f(\chi)^{-1} = F_{f^{-1}}(\chi)$, where $f^{-1}: (2^X)_{\text{fin}} \to A$ can
be calculated recursively by using $f^{-1}(\emptyset) = f(\emptyset)^{-1}$ and
\begin{equation}
f^{-1}(X') \; = \; f(\emptyset)^{-1} \cdot \left( - \sum_{\emptyset \subset X'' \subseteq X'} f(X'') \cdot f^{-1} (X' \setminus X'') \right)
\end{equation}
for all $\emptyset \subset X' \subseteq X$. Moreover, if $F_g(0) = g(\emptyset) = 0$, then the inverse of $1+F_g(\chi)$ can also
be calculated by substituting $F_g(\chi)$ into $(1+\mathcal{X})^{-1}$, that is
\begin{equation}
\left(1+F_g(\chi)\right)^{-1} \; = \; \sum_{k=0}^{\infty} (-1)^k k! \cdot \frac{F_g(\chi)^k}{k!}.
\end{equation}
If $char \; A = 2$, this reduces to $\left(1+F_g(\chi)\right)^{-1} \; \equiv \; 1+F_g(\chi)$. \qed
\end{proposition}

\begin{example}
For two set functions $f,g: (2^X)_{\text{fin}} \to A$ we have the following equivalence~:
\begin{equation}
F_g(\chi) \; = \; \exp(\mathcal{X}) \cdot F_f(\chi) \qquad \Leftrightarrow \qquad
F_f(\chi) \; = \; \exp(-\mathcal{X}) \cdot F_g(\chi).
\end{equation}
In other words,
\begin{eqnarray}
g(X') &=& \sum_{X'' \subseteq X'} f(X'')
\qquad \forall \, X' \in (2^X)_{\text{fin}}  \qquad \Leftrightarrow \qquad \nonumber \\
f(X') &=& \sum_{X'' \subseteq X'} (-1)^{|X' \setminus X''|} g(X'')
\qquad \forall \, X' \in (2^X)_{\text{fin}}.
\end{eqnarray}
This is nothing else than the famous inclusion-exclusion principle, also known as the sieve principle.
\end{example}

\medskip

\noindent
Finally, for every $x \in X$, we use the derivatives $\partial^x$ defined by
\begin{equation}
\partial^x \, \chi^{X'} \; := \;
\left\{ \begin{array}{cl}
\chi^{X'}, & \hbox{if \, $x \in X'$,} \\
        0, & \hbox{otherwise.}
\end{array} \right.
\end{equation}
The product rule
\begin{equation}
\partial^x \left(F_f (\chi) \cdot F_g (\chi)\right) \; = \;
\left(\partial^x F_f (\chi)\right) \cdot F_g (\chi) + F_f (\chi) \cdot \left(\partial^x F_g (\chi)\right)
\end{equation}
is the algebraic analogue of the most fundamental set theoretic fact:
\begin{equation}
x \in X_1 \uplus X_2 \qquad \Leftrightarrow \qquad  x \in X_1 \quad \hbox{or} \quad x \in X_2.
\end{equation}
In this way, combinatorial arguments where two cases have to be distinguished
can be replaced by differential calculus. The product rule immediately implies that
$\partial^x \left(F_f(\chi)\right)^n = n \cdot \left(F_f(\chi)\right)^{n-1} \cdot \partial^x F_f (\chi)$,
i.e., it implies the chain rule~:
\begin{equation}
\partial^x \left(G(F_f (\chi))\right) \; = \; G'(F_f (\chi)) \cdot \partial^x F_f (\chi), \quad G \in A![[\mathcal{X}]].
\end{equation}

\bigskip

\begin{remark} Under the algebra isomorphism $A[X] \simeq A[\{\chi_x\}]/\langle\{\chi_x^2\}\rangle$,
$\partial^x$ does not correspond to $\partial / \partial \chi_x$, but to $\chi_x \cdot \partial / \partial \chi_x$.
The partial derivative $\partial / \partial \chi_x$ cannot be defined in $A[X]$.
\end{remark}

\bigskip

\noindent
For any weight function $w: X \to A$, we can also use the differential operator
\begin{equation}
\partial_w \; := \; \sum_{x \in X} w(x) \cdot \partial^x,
\end{equation}
which is the general form of a differential operator for which the product rule is satisfied.
In particular, if $w(x) = 1$ for every $x \in X$, we let $\partial_w = \partial$. This means that 
\begin{equation}
\partial F_f (\chi)\; = \; \frac{d}{dt} F_f (t \cdot \chi) |_{t=1} \; = \; \sum_{\emptyset \subseteq X' \subseteq X} f(X') \cdot |X'| \cdot \chi^{X'}.
\end{equation}
If now $g: \mathbb{N} \to A$ is an $A$-valued function on the natural numbers, then
\begin{equation}
\partial \sum_{k=0}^{\infty} g(k) \cdot \frac{\mathcal{X}^k}{k!} \; = \;
\sum_{k=0}^{\infty} g(k) \cdot k \cdot \frac{\mathcal{X}^k}{k!} \; = \;
\mathcal{X}\frac{d}{d\mathcal{X}}\sum_{k=0}^{\infty} g(k) \cdot \frac{\mathcal{X}^k}{k!}.
\end{equation}
It is remarkable that P\'olya and Szeg\"o~\cite{PZ} introduce $\mathcal{X}\frac{d}{d\mathcal{X}}$ as
``the'' differential operator for power series.

\begin{example}
For two set functions $f,g: (2^X)_{\text{fin}} \to A$ with $f(\emptyset) = g(\emptyset) = 0$ we have the following equivalence~:
\begin{equation}
1+F_g(\chi) \; = \; \exp\left(F_f(\chi)\right) \qquad \Leftrightarrow \qquad
  F_f(\chi) \; = \; \log\left(1+F_g(\chi)\right). \label{eq:log}
\end{equation}
In other words,
\begin{eqnarray}
g(X') &=& \sum_{k=1}^{\infty} \, \sum_{B_1 \uplus \dots \uplus B_k = X'} \, \prod_{i=1}^k f(B_i)
\qquad \forall X' \in (2^X)_{\text{fin}} \setminus \emptyset  \qquad \Leftrightarrow \qquad \nonumber \\
f(X') &=& \sum_{k=1}^{\infty} (-1)^{k-1}(k-1)! \sum_{B_1 \uplus \dots \uplus B_k = X'} \, \prod_{i=1}^k g(B_i)
\qquad \forall X' \in (2^X)_{\text{fin}} \setminus \emptyset.
\end{eqnarray}
This is nothing else than M\"obius inversion for the lattice of partitions, for generalized multiplicative functions
(i.e. the value on a partition is the product of the values on its blocks; but the latter may depend on
the block and not just on its cardinality; see \cite{A}, chapter V.1.C, and \cite{DRS}, chapter 5.2).
Our equivalence~\ref{eq:log} can be continued~:
\begin{eqnarray}
F_f(\chi) &=& \log\left(1+F_g(\chi)\right)    \\
\Leftrightarrow \qquad  \partial^x F_f(\chi) &=& \partial^x \log\left(1+F_g(\chi)\right) \qquad \forall \, x \in X   \\
\Leftrightarrow \qquad  \left(1+F_g(\chi)\right) \cdot \partial^x F_f(\chi) &=& \partial^x F_g(\chi) \qquad \forall \, x \in X  \\
\Leftrightarrow \qquad  f(X') + \sum_{x \in X'' \subset X'} f(X'') g(X' \setminus X'') &=& g(X') \qquad \forall \, x \in X' \subseteq X.
\end{eqnarray}
If $char \; A = 0$, then we also get the equivalences
\begin{eqnarray}
F_f(\chi) &=& \log\left(1+F_g(\chi)\right)    \\
\Leftrightarrow \qquad  \partial F_f(\chi) &=& \partial \log\left(1+F_g(\chi)\right) \\
\Leftrightarrow \qquad  \left(1+F_g(\chi)\right) \cdot \partial F_f(\chi) &=& \partial F_g(\chi) \\
\Leftrightarrow \qquad  |X'|f(X') + \sum_{\emptyset \subset X'' \subset X'} |X''|f(X'') g(X' \setminus X'') &=& |X'|g(X') \quad \forall \, \emptyset \subset X' \subseteq X.
\end{eqnarray}
All those equivalences have the great advantage that they use just one single product of set functions.
This is particularly useful for calculating the logarithm of a set function by computer.
\end{example}



\section{The Product Theorem for Counting Functions}

For finite sets $X$ and $Y$ let $T$ be a family of functions $t : X' \to Y'$, $X' \subseteq X$, $Y' \subseteq Y$.
In order to evaluate the number of those functions, we consider a partition $Y' = Y_1 \uplus Y_2$, which induces a partition of $X'$ via $t$,
namely $X' = t^{-1}(Y_1) \uplus t^{-1}(Y_2)$.

\begin{definition}
The family of functions $T$ is called \emph{partitionable} if the following conditions are satisfied~:

a) For every $t \in T$, $t : X' \to Y'$, and $Y' = Y_1 \uplus Y_2$, the two functions
$t|_{t^{-1}(Y_1)} : t^{-1}(Y_1) \to Y_1$ and $t|_{t^{-1}(Y_2)} : t^{-1}(Y_2) \to Y_2$
also belong to the family $T$.

b) For any $t_1,t_2 \in T$, $t_1 : X_1 \to Y_1$, $t_2 : X_2 \to Y_2$, with $X_1 \cap X_2 = Y_1 \cap Y_2 = \emptyset$,
there exists exactly one $t \in T$, $t : X_1 \cup X_2 \to Y_1 \cup Y_2$ such that $t|_{X_1} = t_1$ and $t|_{X_2} = t_2$.

In particular, there is exactly one $t_0 \in T$: $t_0 : \emptyset \to \emptyset$,
and there are no further functions of the form $t: X' \to \emptyset$. Moreover,
there is exactly one function of the form $t: \emptyset \to Y'$ if every $y \in Y'$
can have an empty preimage; if one $y \in Y'$ has to have a nonempty preimage, then
there is no function $t: \emptyset \to Y'$.
\end{definition}

From now on, we will only make use of partitionable families of functions.
This is not very restrictive as can be seen in the following proposition.

\begin{proposition}
The family of surjective functions is partitionable, the family of injective functions is partitionable,
and the family of bijective functions is partitionable. If $G = (X,Y;E)$ is a bipartite graph, then
the family of functions respecting its edges (that is $(x,t(x))$ must always be an edge of~$G$)
is also partitionable. If a hypergraph is defined on $X$ (its so-called hyperedges form
just a family of subsets of $X$) and if $Y$ is considered as a set of colours,
then colourings of $X'$ using colours of $Y'$ without monochromatic hyperedges
are partitionable. Moreover, the intersection of two partitionable families
of functions is also partitionable.
\end{proposition}

\begin{proof}
It is sufficient to verify that for any $t_1 : X_1 \to Y_1$, $t_2 : X_2 \to Y_2$ with $X_1 \cap X_2 = Y_1 \cap Y_2 = \emptyset$,
and $t : X_1 \cup X_2 \to Y_1 \cup Y_2$ with $t|_{X_1} = t_1$ and $t|_{X_2} = t_2$, we have the following equivalences~:

The function $t$ is surjective if and only if $t_1$ and $t_2$ are surjective.
The function $t$ is injective if and only if $t_1$ and $t_2$ are injective.
The function $t$ is bijective if and only if $t_1$ and $t_2$ are bijective.
The function $t$ respects the edges of a bipartite graph $G = (X,Y;E)$ if and only if $t_1$ and $t_2$ respect its edges.
The function $t$ is a colouring without monochromatic hyperedges if and only if $t_1$ and $t_2$ are colourings of this type.
\end{proof}

Let $T$ be a partitionable family of functions. If we want to count all $t : X' \to Y'$ with $X' \subseteq X$, $Y' \subseteq Y$ and $t \in T$,
then we can fix a partition $Y' = Y_1 \uplus Y_2$. Next, for all partitions $X' = X_1 \uplus X_2$,
we just have to count the number of functions $t_1 : X_1 \to Y_1$ with $t_1 \in T$
and the number of functions $t_2 : X_2 \to Y_2$ with $t_2 \in T$.
If we multiply the two results and sum over all partitions $X' = X_1 \uplus X_2$ we get exactly the desired result.
Therefore, for every $Y' \subseteq Y$, we define the set function
\begin{equation}
T^{Y'}(\chi) \; := \; \sum_{\emptyset \subseteq X' \subseteq X} | \{ t \in T | t : X' \to Y' \} | \cdot \chi^{X'}.
\end{equation}
The argument that we have just presented proves now the following lemma.

\begin{lemma}
For finite sets $X$ and $Y$ let $T$ be a partitionable family of functions $t : X' \to  Y'$, $X' \subseteq X$, $Y' \subseteq Y$.
Then, for any partition $Y' = Y_1 \uplus Y_2$ we have the following identity~:
\begin{equation}
T^{Y'}(\chi) \; = \; T^{Y_1}(\chi) \cdot T^{Y_2}(\chi).
\end{equation}
\qed
\end{lemma}

\medskip

\noindent
If we apply our lemma several times, we get the main theorem of this section.

\begin{thm} (Product Theorem)
For finite sets $X$ and $Y$, let $T$ be a partitionable family of functions $t : X' \to  Y'$, $X' \subseteq X$, $Y' \subseteq Y$.
Then for any $Y' \subseteq Y$ the following identity holds~:
\begin{equation}
T^{Y'}(\chi) \; = \; \prod_{y \in Y'} T^{\{y\}}(\chi).
\end{equation}
In particular,
\begin{equation}
T^{\emptyset}(\chi) \; = \; 1 \qquad \hbox{and} \qquad T^{Y'}(0) \; = \; \prod_{y \in Y'} T^{\{y\}}(0).
\end{equation}
For any $y \in Y$ and $X' \subseteq X$ the coefficient of $\chi^{X'}$ in $T^{\{y\}}(\chi)$ is $1$
if $t : X' \to \{y\}$ belongs to $T$ and $0$ otherwise. Those coefficients determine a family
of partitionable functions uniquely.\qed
\end{thm}

\begin{example}
If $T$ is the family of injective functions, then $T^{\{y\}}(\chi) = 1 + \mathcal{X}$
because a function is injective if and only if the preimage of every $y \in Y$ is either
the empty set or a one-element subset of $X$. Therefore, the product theorem implies
\begin{equation}
T^{Y}(\chi) \; = \; \prod_{y \in Y} T^{\{y\}}(\chi) \; = \; (1 + \mathcal{X})^{|Y|} \; = \; \sum_{k=0}^{|X|} \binom{|Y|}{k} \mathcal{X}^k
            \; = \; \sum_{k=0}^{|X|} |Y|^{\underline{k}} \cdot \frac{\mathcal{X}^k}{k!},
\end{equation}
where $|Y|^{\underline{k}} = |Y|(|Y|-1)(|Y|-2) \cdots (|Y|-k+1)$ by definition.
The coefficient of $\chi^X$ in $T^{Y}(\chi)$ counts the number of injective functions from $X$ to $Y$.
It is equal to $|Y|^{\underline{|X|}} = |Y|(|Y|-1)(|Y|-2) \cdots (|Y|-|X|+1)$, as expected.
\end{example}

\begin{definition}
A weight function $w : T \to A$ is called \emph{multiplicative} if for every $t : X' \to Y'$, ($X' \subseteq X$, $Y' \subseteq Y$, $t \in T$)
and $Y' = Y_1 \uplus Y_2$ we have
\begin{equation}
w(t) \; = \; w(t|_{t^{-1}(Y_1)}) \cdot w(t|_{t^{-1}(Y_2)}),
\end{equation}
where $t|_{t^{-1}(Y_1)} : t^{-1}(Y_1) \to Y_1$,  $t|_{t^{-1}(Y_2)} : t^{-1}(Y_2) \to Y_2$, and in particular $w(t_0) = 1$ for $t_0 : \emptyset \to \emptyset$.
\end{definition}

\begin{example}
If a hypergraph is defined on $X$ (its so-called hyperedges form just a family of subsets of $X$)
and if $Y$ is considered as a set of colours, we can study colourings $c : X' \to Y'$ with
$X' \subseteq X$ and $Y' \subseteq Y$. Let $m(c)$ be the number of monochromatic
hyperedges in $X'$. Then, for every $a \in A$ (or for a variable $a$),
$w(c) = a^{m(c)}$ is a multiplicative weight function.
\end{example}

\medskip

\noindent
In the same spirit as previously, for every $Y' \subseteq Y$, let us define the set function
\begin{equation}
T^{Y'}_w(\chi) \; := \; \sum_{\emptyset \subseteq X' \subseteq X} \left( \sum_{t: X' \to Y', \; t \in T} w(t) \right) \cdot \chi^{X'}
\end{equation}
(in the unweighted case, the weight of every function from $T$ was $1$). Then, we get again a product theorem.

\begin{thm} (Weighted Product Theorem)
For finite sets $X$ and $Y$ let $T$ be a partitionable family of functions $t : X' \to  Y'$, $X' \subseteq X$, $Y' \subseteq Y$
and let $w : T \to A$ be a multiplicative weight function. Then, for any $Y' \subseteq Y$ the following identity holds~:
\begin{equation}
T^{Y'}_w(\chi) \; = \; \prod_{y \in Y'} T^{\{y\}}_w(\chi).
\end{equation}
In particular,
\begin{equation}
T^{\emptyset}_w(\chi) \; = \; 1 \qquad \hbox{and} \qquad T^{Y'}_w(0) \; = \; \prod_{y \in Y'} T^{\{y\}}_w(0).
\end{equation}
\qed
\end{thm}

Without loss of generality, it is possible in the weighted case to take for~$T$ the partitionable family of all functions $t : X' \to Y'$, as functions which do not belong to the original~$T$ can just get the weight~$0$.

On the other hand, if we choose for every $y \in Y$ an arbitrary set function $T^{\{y\}}_w(\chi) \in A[X]$ and if we consider
the previous theorem as a definition, then we automatically get a partitionable family of functions with a multiplicative
weight function. This situation is characterized by the fact that, for every $y \in Y$, we can choose independently
which subsets $X' \subseteq X$ may be mapped to $y$ and what kind of weight we want to use for every
$X' \subseteq X$.

In that case, $T^{Y'}_w(\chi)$ counts functions to~$Y'$ in such a way that every $y \in Y'$ contributes a multiplication
with the coefficient of $\chi^{t^{-1}\{y\}}$ in $T^{\{y\}}_w(\chi)$. In particular, every $y \in Y'$ that is not in the
image of~$t$ contributes a multiplication with $T^{\{y\}}_w(0)$. Therefore $T^{Y'}_w(\chi)$ counts weighted
surjective functions to $Y' \subseteq Y$ if $T^{\{y\}}_w(0) = 0$ for every $y \in Y$.
If $T^{\{y\}}_w(0) = 1$ for every $y \in Y$, then $T^{Y'}_w(\chi)$ counts arbitrary
weighted functions to $Y'$.



\section{Duality for the Enumeration of Functions}

For finite sets~$X$ and~$Y$ let $T$ be a set of functions $t : X \to Y$, and let $w : T \to A$ be a weight function.
In this section, $A$ need not be a ring~: it is sufficient to have an abelian group. Without loss of generality,
we can (and will) assume that $T$ is the set of all the $|Y|^{|X|}$ possible functions $t : X \to Y$,
because functions which did not belong to the original~$T$ can just get the weight~$0$.

If $|X| = 1$, then we have $|Y|$ different functions and therefore $|Y|$~weights.
Those weights can be considered as a vector with $|Y|$~elements from our abelian group~$A$.
Let $s$ be the sum of those $|Y|$ weights. It is possible, in a unique way, to find one
additional element of~$A$, namely~$-s$, and to form a vector with $|Y|+1$~elements, namely our
original $|Y|$ weights and the additional weight, in such a way that the sum of all the weights
of our new vector of length $|Y|+1$ equals~$0$.

This completely trivial observation becomes more interesting if $|X| = 2$. In that case,
we have $|Y|^2$ different functions and therefore $|Y|^2$ weights forming a matrix of size
$|Y| \times |Y|$. Now it is possible, in a unique way, to add exactly one row and one column
to our matrix in order to get a matrix of size $(|Y|+1) \times (|Y|+1)$ with the property that
every row sum and every column sum of it equals~$0$. This is still almost trivial, but already
a little bit tricky for the element which is both in the new row and in the new column. This
element, however, has to be equal to the sum of all elements of our original matrix of size $|Y| \times |Y|$.

If $|X| = 3$, our weights form a cube of size $|Y| \times |Y| \times |Y|$, which can be embedded
uniquely in a cube of size $(|Y|+1) \times (|Y|+1) \times (|Y|+1)$ in such a way that the sum of
the elements of every row, every column and every pillar becomes~$0$, etc. We want to formalize
those ideas rigorously in the language of functions.

\begin{definition}
The pair $(T,w)$ is called \emph{normal} if for every $x \in X$ and for every test function $t^* : X \setminus \{x\} \to Y$ we have
\begin{equation}
\sum_{\textstyle t : X \to Y, \; t|_{X \setminus \{x\}} = t^*} w(t) \; = \; 0.
\end{equation}
\end{definition}

\medskip

\noindent
Usually, $(T,w)$ is not normal, but we have the following proposition.

\begin{proposition}
The pair $(T,w)$ can be normalized in a unique way by adding one new element~$y$ to the set~$Y$.
Thereby the function $t': X \to Y \cup \{y\}$ gets the weight
\begin{equation}
w(t') \; = \; (-1)^{|t'^{-1}(y)|} \sum_{\textstyle t : X \to Y, \; t|_{t'^{-1}(Y)} = t'|_{t'^{-1}(Y)}} w(t).
\end{equation}
\end{proposition}

\begin{proof}
If we choose the weights according to the preceding formula, then the situation becomes normal~:
for every test function $t^* : X \setminus \{x\} \to Y \cup \{y\}$ we get
\begin{eqnarray}
  && \sum_{\textstyle t' : X \to Y \cup \{y\}, \; t'|_{X \setminus \{x\}} = t^*} w(t') \nonumber \\
= && \sum_{\textstyle t' : X \to Y \cup \{y\}, \; t'|_{X \setminus \{x\}} = t^*, \; t'(x) \in Y} w(t') + \nonumber \\
  && \sum_{\textstyle t' : X \to Y \cup \{y\}, \; t'|_{X \setminus \{x\}} = t^*, \; t'(x) = y} w(t') \nonumber \\
= &&            (-1)^{|{t^*}^{-1}(y)|} \sum_{\textstyle t : X \to Y, \; t|_{{t^*}^{-1}(Y)} = t^*|_{{t^*}^{-1}(Y)}} w(t) + \nonumber \\
  && (-1)^{|{t^*}^{-1}(y) \cup \{x\}|} \sum_{\textstyle t : X \to Y, \; t|_{{t^*}^{-1}(Y)} = t^*|_{{t^*}^{-1}(Y)}} w(t) \nonumber \\
= && 0,
\end{eqnarray}
as desired. On the other hand, if we want to have normality, we must choose the weights as indicated in the proposition.
This can be proved easily by induction on~$|t'^{-1}(y)|$. The basis $|t'^{-1}(y)| = 0$ is evident.
But if $x \in t'^{-1}(y)$, the normality and the inductive hypothesis imply
\begin{eqnarray}
w(t') &=& - \sum_{\textstyle t'': X \to Y \cup \{y\}, \; t''|_{X \setminus \{x\}} = t'|_{X \setminus \{x\}}, \; t''(x) \in Y} w(t'') \nonumber \\
      &=& (-1)^{|t'^{-1}(y)|} \sum_{\textstyle t : X \to Y, \; t|_{t'^{-1}(Y)} = t'|_{t'^{-1}(Y)}} w(t).
\end{eqnarray}
\end{proof}

\bigskip

Now choose for every $x \in X$ some $Y_x \subseteq Y$ and join $x$ to every $y \in Y_x$ by an edge.
The bipartite graph obtained in this way will be denoted by $G = (X,Y;E)$. If for some $x \in X$ we choose the set
$Y \setminus Y_x$ instead of $Y_x$, then, instead of~$G$, we obtain its partial complement denoted by~$C_x(G)$.
In general, for any $X' \subseteq X$, we can define the complement $C_{X'}(G)$ with respect to~$X'$ by replacing,
for every $x \in X'$, the set~$Y_x$ by $Y \setminus Y_x$. It is evident that for all $X',X'' \subseteq X$ we have
$C_{X'}(C_{X''}(G)) = C_{X' \triangle X''}(G)$, where $\triangle$ denotes the symmetric difference of two sets.
Naturally, $C_{\emptyset}(G) = G$ and $C_X(G) = \overline{G}$, where $\overline{G}$ is called the bipartite complement of~$G$.
Finally, we define $W(T,w,G)$ as the weighted number of functions from $T$ which respect the edges of~$G$, that is
\begin{equation}
W(T,w,G) \; = \; \sum_{\textstyle t : X \to Y, \; t(x) \in Y_x \; \forall x \in X} w(t).
\end{equation}
Let us suppose that $(T,w)$ is \emph{normal}, and let us fix some $x \in X$. If we sum, for every test function $t^* : X \setminus \{x\} \to Y$
respecting the edges of~$G$ the condition of normality (see the previous definition), then we get the relation
$W(T,w,G) + W(T,w,C_x(G)) = 0$. This proves the following theorem.

\begin{thm} (Duality Theorem)
Let $(T,w)$ be normal. Then, for every $x \in X$,
\begin{equation}
W(T,w,C_x(G)) \; = \; -W(T,w,G).
\end{equation}
More generally, for any $X' \subseteq X$,
\begin{equation}
W(T,w,C_{X'}(G)) \; = \; (-1)^{|X'|}W(T,w,G),
\end{equation}
and in particular,
\begin{equation}
W(T,w,\overline{G}) \; = \; (-1)^{|X|}W(T,w,G).
\end{equation}
\qed
\end{thm}

\bigskip

\begin{remark}
On the other hand, if $t^* : X \setminus \{x\} \to Y$ is a test function and if for~$G$ we choose
the graph which has the $|X|-1$ edges $(x',t^*(x'))$ with $x' \in X \setminus \{x\}$,
then the equation $W(T,w,C_x(G)) = -W(T,w,G)$ implies the normality condition (see the
preceding definition).
\end{remark}


\bigskip

\noindent
\emph{Acknowledgements:} \quad
I am grateful to D.~Foata and G.~Han for all their help and encouragement.


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{thebibliography}{99}

\bibitem{A}
M.~Aigner,
"Combinatorial Theory",
Springer-Verlag, Heidelberg, 1979.

\bibitem{DRS}
P.~Doubilet, G.-C.~Rota and R.~Stanley,
"On the foundations of combinatorial theory (VI)",
in~: Proceedings of the Sixth Berkely Symposium on
Mathematical Statistics and Probability, v.~II,
University of California Press, Berkeley, 1972, 267-318.

\bibitem{L1}
B.~Lass,
"Matching polynomials and duality",
\emph{Combinatorica} \textbf{24} (2004), 427-440.

\bibitem{L2}
B.~Lass,
"Orientations acycliques et le polyn\^ome chromatique",
\emph{European J.~Combin.} \textbf{22} (2001), 1101-1123.

\bibitem{L3}
B.~Lass,
"The $N$-dimensional matching polynomial",
\emph{Geom.~Funct.~Anal.} \textbf{15} (2005), 453-475.

\bibitem{L4}
B.~Lass,
"Variations sur le th\`eme $E+\overline E=XY$",
\emph{Adv.~in Appl.~Math.} \textbf{29} (2002), 215--242.

\bibitem{L5}
B.~Lass,
"The algebra of set functions II~: An enumerative analogue
of Hall's theorem for bipartite graphs", preprint.

\bibitem{PZ}
G.~P\'olya and G.~Szeg\"o,
"Problems and Theorems in Analysis I,
Series, integral calculus, theory of functions",
Springer-Verlag, Berlin, 1978.

\end{thebibliography}
\end{document}
