Wiki IA
Applications

Cas d'usage

Applications concrètes de l'IA dans différents domaines

L'intelligence artificielle transforme de nombreux secteurs. Voici les principaux cas d'usage avec des exemples concrets d'implémentation.

Chatbots et assistants

Assistant conversationnel

from anthropic import Anthropic

class ConversationalAssistant:
    def __init__(self, system_prompt: str):
        self.client = Anthropic()
        self.system_prompt = system_prompt
        self.conversation_history = []

    def chat(self, user_message: str) -> str:
        self.conversation_history.append({
            "role": "user",
            "content": user_message
        })

        response = self.client.messages.create(
            model="claude-sonnet-4-20250514",
            max_tokens=1024,
            system=self.system_prompt,
            messages=self.conversation_history
        )

        assistant_message = response.content[0].text
        self.conversation_history.append({
            "role": "assistant",
            "content": assistant_message
        })

        return assistant_message

    def reset(self):
        self.conversation_history = []

# Utilisation
assistant = ConversationalAssistant(
    system_prompt="""Tu es un assistant de support client pour une entreprise de e-commerce.
    Tu aides les clients avec leurs commandes, retours et questions sur les produits.
    Sois poli, concis et oriente vers les solutions."""
)

response = assistant.chat("Ma commande n'est pas arrivée")

Chatbot avec RAG

from anthropic import Anthropic
import chromadb

class RAGChatbot:
    def __init__(self, collection_name: str):
        self.llm = Anthropic()
        self.db = chromadb.PersistentClient(path="./db")
        self.collection = self.db.get_collection(collection_name)

    def get_context(self, query: str, n_results: int = 5) -> str:
        results = self.collection.query(
            query_texts=[query],
            n_results=n_results
        )
        return "\n\n".join(results['documents'][0])

    def answer(self, question: str) -> dict:
        context = self.get_context(question)

        prompt = f"""Utilise le contexte suivant pour répondre à la question.
Si la réponse n'est pas dans le contexte, dis-le clairement.

CONTEXTE:
{context}

QUESTION: {question}

RÉPONSE:"""

        response = self.llm.messages.create(
            model="claude-sonnet-4-20250514",
            max_tokens=1024,
            messages=[{"role": "user", "content": prompt}]
        )

        return {
            "answer": response.content[0].text,
            "sources": context
        }

Analyse de documents

Extraction d'informations structurées

from anthropic import Anthropic
from pydantic import BaseModel
import json

class InvoiceData(BaseModel):
    vendor: str
    invoice_number: str
    date: str
    total_amount: float
    items: list[dict]

def extract_invoice_data(invoice_text: str) -> InvoiceData:
    client = Anthropic()

    response = client.messages.create(
        model="claude-sonnet-4-20250514",
        max_tokens=1024,
        messages=[{
            "role": "user",
            "content": f"""Extrais les informations de cette facture au format JSON.

Format attendu:
{{
    "vendor": "Nom du fournisseur",
    "invoice_number": "Numéro de facture",
    "date": "YYYY-MM-DD",
    "total_amount": 0.00,
    "items": [
        {{"description": "...", "quantity": 0, "unit_price": 0.00, "total": 0.00}}
    ]
}}

FACTURE:
{invoice_text}

JSON:"""
        }]
    )

    data = json.loads(response.content[0].text)
    return InvoiceData(**data)

Résumé de documents

def summarize_document(document: str, max_length: int = 200) -> str:
    client = Anthropic()

    response = client.messages.create(
        model="claude-sonnet-4-20250514",
        max_tokens=max_length,
        messages=[{
            "role": "user",
            "content": f"""Résume ce document en {max_length} mots maximum.
Conserve les informations essentielles et les chiffres clés.

DOCUMENT:
{document}

RÉSUMÉ:"""
        }]
    )

    return response.content[0].text

# Pour de longs documents : résumé hiérarchique
def summarize_long_document(document: str, chunk_size: int = 4000) -> str:
    # Découper en chunks
    chunks = [document[i:i+chunk_size] for i in range(0, len(document), chunk_size)]

    # Résumer chaque chunk
    summaries = [summarize_document(chunk, 100) for chunk in chunks]

    # Résumer les résumés
    combined = "\n\n".join(summaries)
    return summarize_document(combined, 200)

Classification de documents

from transformers import pipeline

# Avec un modèle pré-entraîné
classifier = pipeline(
    "zero-shot-classification",
    model="facebook/bart-large-mnli"
)

def classify_document(text: str, categories: list[str]) -> dict:
    result = classifier(text, categories, multi_label=True)
    return {
        label: score
        for label, score in zip(result['labels'], result['scores'])
    }

# Exemple
categories = ["Juridique", "Finance", "RH", "Technique", "Commercial"]
result = classify_document(
    "Contrat de travail à durée indéterminée...",
    categories
)
# {'Juridique': 0.89, 'RH': 0.76, ...}

Génération de contenu

Rédaction d'articles

def generate_article(topic: str, style: str, length: int) -> str:
    client = Anthropic()

    response = client.messages.create(
        model="claude-sonnet-4-20250514",
        max_tokens=length * 2,  # Marge pour les tokens
        messages=[{
            "role": "user",
            "content": f"""Rédige un article de blog sur le sujet suivant.

SUJET: {topic}
STYLE: {style}
LONGUEUR: environ {length} mots

Structure:
- Titre accrocheur
- Introduction
- 3-4 sections avec sous-titres
- Conclusion avec call-to-action

ARTICLE:"""
        }]
    )

    return response.content[0].text

Génération de code

def generate_code(description: str, language: str = "python") -> str:
    client = Anthropic()

    response = client.messages.create(
        model="claude-sonnet-4-20250514",
        max_tokens=2048,
        messages=[{
            "role": "user",
            "content": f"""Génère du code {language} pour la fonctionnalité suivante.

DESCRIPTION: {description}

Inclus:
- Code complet et fonctionnel
- Commentaires explicatifs
- Gestion des erreurs
- Exemple d'utilisation

CODE:"""
        }]
    )

    return response.content[0].text

def explain_code(source_code: str) -> str:
    client = Anthropic()

    prompt = f"""Explique ce code de manière détaillée.

CODE:
{source_code}

Explique:
1. Ce que fait le code
2. Comment il fonctionne
3. Les concepts clés utilisés

EXPLICATION:"""

    response = client.messages.create(
        model="claude-sonnet-4-20250514",
        max_tokens=1024,
        messages=[{"role": "user", "content": prompt}]
    )

    return response.content[0].text

Vision par ordinateur

Classification d'images

import torch
from torchvision import transforms, models
from PIL import Image

# Charger un modèle pré-entraîné
model = models.resnet50(pretrained=True)
model.train(False)  # Mode inférence

# Préprocessing
preprocess = transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize(
        mean=[0.485, 0.456, 0.406],
        std=[0.229, 0.224, 0.225]
    )
])

def classify_image(image_path: str) -> list[tuple[str, float]]:
    # Charger et préprocesser
    image = Image.open(image_path)
    input_tensor = preprocess(image).unsqueeze(0)

    # Prédiction
    with torch.no_grad():
        output = model(input_tensor)
        probabilities = torch.nn.functional.softmax(output[0], dim=0)

    # Top 5 prédictions
    top5_prob, top5_idx = torch.topk(probabilities, 5)

    # Charger les labels ImageNet
    with open("imagenet_classes.txt") as f:
        categories = [line.strip() for line in f.readlines()]

    return [
        (categories[idx], prob.item())
        for prob, idx in zip(top5_prob, top5_idx)
    ]

Détection d'objets

from transformers import DetrImageProcessor, DetrForObjectDetection
from PIL import Image
import torch

# Charger le modèle DETR
processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50")
model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50")

def detect_objects(image_path: str, threshold: float = 0.9) -> list[dict]:
    image = Image.open(image_path)

    # Préprocesser
    inputs = processor(images=image, return_tensors="pt")

    # Prédiction
    with torch.no_grad():
        outputs = model(**inputs)

    # Post-traitement
    target_sizes = torch.tensor([image.size[::-1]])
    results = processor.post_process_object_detection(
        outputs, target_sizes=target_sizes, threshold=threshold
    )[0]

    detections = []
    for score, label, box in zip(
        results["scores"], results["labels"], results["boxes"]
    ):
        detections.append({
            "label": model.config.id2label[label.item()],
            "confidence": score.item(),
            "box": box.tolist()  # [x1, y1, x2, y2]
        })

    return detections

OCR (Reconnaissance de texte)

import pytesseract
from PIL import Image
import cv2
import numpy as np

def extract_text_from_image(image_path: str) -> str:
    # Charger l'image
    image = cv2.imread(image_path)

    # Prétraitement
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    denoised = cv2.fastNlMeansDenoising(gray)
    _, binary = cv2.threshold(denoised, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

    # OCR
    text = pytesseract.image_to_string(binary, lang='fra')

    return text.strip()

# Avec positions des mots
def extract_text_with_positions(image_path: str) -> list[dict]:
    image = Image.open(image_path)
    data = pytesseract.image_to_data(image, output_type=pytesseract.Output.DICT)

    words = []
    for i, word in enumerate(data['text']):
        if word.strip():
            words.append({
                'text': word,
                'x': data['left'][i],
                'y': data['top'][i],
                'width': data['width'][i],
                'height': data['height'][i],
                'confidence': data['conf'][i]
            })

    return words

Analyse de données

Prédiction de séries temporelles

import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import torch
import torch.nn as nn

class LSTMPredictor(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, output_size):
        super().__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)
        c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)
        out, _ = self.lstm(x, (h0, c0))
        out = self.fc(out[:, -1, :])
        return out

def prepare_sequences(data, seq_length):
    X, y = [], []
    for i in range(len(data) - seq_length):
        X.append(data[i:i+seq_length])
        y.append(data[i+seq_length])
    return np.array(X), np.array(y)

# Entraînement
scaler = MinMaxScaler()
scaled_data = scaler.fit_transform(data.reshape(-1, 1))
X, y = prepare_sequences(scaled_data, seq_length=30)

model = LSTMPredictor(input_size=1, hidden_size=50, num_layers=2, output_size=1)
# ... entraînement

Détection d'anomalies

from sklearn.ensemble import IsolationForest
from sklearn.preprocessing import StandardScaler
import pandas as pd

class AnomalyDetector:
    def __init__(self, contamination: float = 0.1):
        self.scaler = StandardScaler()
        self.model = IsolationForest(
            contamination=contamination,
            random_state=42
        )

    def fit(self, data: pd.DataFrame):
        scaled = self.scaler.fit_transform(data)
        self.model.fit(scaled)
        return self

    def detect(self, data: pd.DataFrame) -> pd.DataFrame:
        scaled = self.scaler.transform(data)

        # -1 = anomalie, 1 = normal
        predictions = self.model.predict(scaled)
        scores = self.model.decision_function(scaled)

        data = data.copy()
        data['is_anomaly'] = predictions == -1
        data['anomaly_score'] = scores

        return data

# Utilisation
detector = AnomalyDetector(contamination=0.05)
detector.fit(training_data)
results = detector.detect(new_data)
anomalies = results[results['is_anomaly']]

Segmentation de clients

from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
import pandas as pd

def segment_customers(data: pd.DataFrame, n_segments: int = 5) -> pd.DataFrame:
    # Features pour la segmentation
    features = ['recency', 'frequency', 'monetary']

    # Normalisation
    scaler = StandardScaler()
    scaled = scaler.fit_transform(data[features])

    # Clustering
    kmeans = KMeans(n_clusters=n_segments, random_state=42)
    data['segment'] = kmeans.fit_predict(scaled)

    # Analyse des segments
    segment_analysis = data.groupby('segment')[features].mean()
    print("Profil des segments:")
    print(segment_analysis)

    return data

# Calcul RFM
def calculate_rfm(transactions: pd.DataFrame) -> pd.DataFrame:
    today = transactions['date'].max()

    rfm = transactions.groupby('customer_id').agg({
        'date': lambda x: (today - x.max()).days,  # Recency
        'order_id': 'count',  # Frequency
        'amount': 'sum'  # Monetary
    })

    rfm.columns = ['recency', 'frequency', 'monetary']
    return rfm

Traitement du langage

Analyse de sentiments

from transformers import pipeline

# Modèle multilingue
sentiment_analyzer = pipeline(
    "sentiment-analysis",
    model="nlptown/bert-base-multilingual-uncased-sentiment"
)

def analyze_sentiment(texts: list[str]) -> list[dict]:
    results = sentiment_analyzer(texts)

    return [
        {
            "text": text,
            "sentiment": "positive" if int(r['label'][0]) >= 4 else
                        "negative" if int(r['label'][0]) <= 2 else "neutral",
            "stars": int(r['label'][0]),
            "confidence": r['score']
        }
        for text, r in zip(texts, results)
    ]

# Analyse d'avis clients
reviews = [
    "Produit excellent, livraison rapide !",
    "Très déçu, ne fonctionne pas.",
    "Correct pour le prix."
]
results = analyze_sentiment(reviews)

Extraction d'entités nommées (NER)

from transformers import pipeline

ner = pipeline("ner", model="Jean-Baptiste/camembert-ner", grouped_entities=True)

def extract_entities(text: str) -> dict:
    entities = ner(text)

    grouped = {}
    for entity in entities:
        entity_type = entity['entity_group']
        if entity_type not in grouped:
            grouped[entity_type] = []
        grouped[entity_type].append({
            'text': entity['word'],
            'score': entity['score']
        })

    return grouped

# Exemple
text = "Emmanuel Macron a rencontré Angela Merkel à Paris le 15 janvier."
entities = extract_entities(text)
# {
#   'PER': [{'text': 'Emmanuel Macron', 'score': 0.99}, {'text': 'Angela Merkel', 'score': 0.98}],
#   'LOC': [{'text': 'Paris', 'score': 0.97}],
#   'DATE': [{'text': '15 janvier', 'score': 0.95}]
# }

Traduction automatique

from transformers import pipeline

# Traduction EN -> FR
translator_en_fr = pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr")

# Traduction FR -> EN
translator_fr_en = pipeline("translation", model="Helsinki-NLP/opus-mt-fr-en")

def translate(text: str, source: str = "en", target: str = "fr") -> str:
    if source == "en" and target == "fr":
        result = translator_en_fr(text, max_length=512)
    elif source == "fr" and target == "en":
        result = translator_fr_en(text, max_length=512)
    else:
        raise ValueError(f"Traduction {source} -> {target} non supportée")

    return result[0]['translation_text']

# Pour de longs textes
def translate_long_text(text: str, source: str, target: str) -> str:
    # Découper en paragraphes
    paragraphs = text.split('\n\n')

    # Traduire chaque paragraphe
    translated = [translate(p, source, target) for p in paragraphs if p.strip()]

    return '\n\n'.join(translated)

Automatisation

Workflow d'approbation

from anthropic import Anthropic
from enum import Enum

class Decision(Enum):
    APPROVE = "approve"
    REJECT = "reject"
    ESCALATE = "escalate"

def automated_approval(request: dict, rules: str) -> dict:
    client = Anthropic()

    response = client.messages.create(
        model="claude-sonnet-4-20250514",
        max_tokens=500,
        messages=[{
            "role": "user",
            "content": f"""Analyse cette demande et décide de l'action à prendre.

RÈGLES D'APPROBATION:
{rules}

DEMANDE:
{request}

Réponds au format JSON:
{{
    "decision": "approve|reject|escalate",
    "reason": "Explication de la décision",
    "confidence": 0.0-1.0
}}

DÉCISION:"""
        }]
    )

    import json
    result = json.loads(response.content[0].text)
    return result

# Exemple
rules = """
- Approuver automatiquement si montant < 1000€
- Rejeter si le fournisseur est sur liste noire
- Escalader si montant > 10000€ ou demande inhabituelle
"""

request = {
    "type": "expense",
    "amount": 500,
    "vendor": "Office Supplies Inc",
    "description": "Fournitures de bureau"
}

decision = automated_approval(request, rules)

Agent de recherche

from anthropic import Anthropic

class ResearchAgent:
    def __init__(self):
        self.client = Anthropic()
        self.findings = []

    def research(self, topic: str, depth: int = 3) -> str:
        # Générer les questions de recherche
        questions = self._generate_questions(topic)

        # Rechercher chaque question
        for question in questions[:depth]:
            finding = self._search_and_analyze(question)
            self.findings.append(finding)

        # Synthèse finale
        return self._synthesize()

    def _generate_questions(self, topic: str) -> list[str]:
        response = self.client.messages.create(
            model="claude-sonnet-4-20250514",
            max_tokens=500,
            messages=[{
                "role": "user",
                "content": f"Génère 5 questions de recherche sur : {topic}"
            }]
        )
        return response.content[0].text.split('\n')

    def _search_and_analyze(self, question: str) -> str:
        # Ici : intégration avec une API de recherche
        # Puis analyse des résultats
        return f"Findings for: {question}"

    def _synthesize(self) -> str:
        response = self.client.messages.create(
            model="claude-sonnet-4-20250514",
            max_tokens=1000,
            messages=[{
                "role": "user",
                "content": f"Synthétise ces découvertes:\n{self.findings}"
            }]
        )
        return response.content[0].text

Résumé des cas d'usage

DomaineApplicationsTechnologies
Service clientChatbots, FAQ automatiqueLLM, RAG
DocumentsExtraction, résumé, classificationNLP, LLM
ContenuRédaction, traduction, codeLLM
VisionClassification, détection, OCRCNN, Transformers
DonnéesPrédiction, anomalies, segmentationML classique, DL
LangageSentiment, NER, traductionNLP, Transformers
AutomatisationWorkflows, agentsLLM, Orchestration

On this page