import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import joblib
from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score, RandomizedSearchCV
from sklearn.impute import SimpleImputer
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import (
classification_report, confusion_matrix, ConfusionMatrixDisplay,
roc_curve, auc, precision_recall_curve, average_precision_score
)
from imblearn.over_sampling import SMOTE
from imblearn.pipeline import Pipeline
=== Carregamento e pré-processamento ===
df = pd.read_json("TelecomX_Data.json")
df = df[df['Churn'].isin(['Yes', 'No'])].copy()
df['Churn'] = df['Churn'].map({'No': 0, 'Yes': 1})
df['account.Charges.Total'] = pd.to_numeric(df['account.Charges.Total'], errors='coerce')
df.drop(columns=['customerID'], inplace=True)
df = pd.get_dummies(df, drop_first=True)
=== Correlação e seleção de features ===
top_features = df.corr()['Churn'].abs().sort_values(ascending=False).iloc[1:11].index.tolist()
X, y = df[top_features], df['Churn']
=== Pipeline com imputação + SMOTE + RandomForest ===
pipeline = Pipeline([
('imputer', SimpleImputer(strategy='mean')),
('smote', SMOTE(random_state=42)),
('rf', RandomForestClassifier(random_state=42))
])
=== Busca aleatória de hiperparâmetros ===
param_dist = {
'rf__n_estimators': [100, 300, 500],
'rf__max_depth': [10, 30, None],
'rf__min_samples_split': [2, 5],
'rf__min_samples_leaf': [1, 2],
'rf__max_features': ['sqrt', 'log2']
}
search = RandomizedSearchCV(pipeline, param_distributions=param_dist, n_iter=30,
scoring='f1', cv=5, random_state=42, n_jobs=-1, verbose=0)
search.fit(X, y)
best_pipeline = search.best_estimator_
=== Avaliação cruzada ===
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
for metric in ['accuracy', 'precision', 'recall', 'f1']:
scores = cross_val_score(best_pipeline, X, y, cv=cv, scoring=metric)
print(f"{metric.capitalize():<10}: Média = {scores.mean():.4f} | Desvio = {scores.std():.4f}")
=== Gráficos e avaliação final ===
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.2, random_state=42)
best_pipeline.fit(X_train, y_train)
y_pred = best_pipeline.predict(X_test)
y_prob = best_pipeline.predict_proba(X_test)[:, 1]
Confusion Matrix
ConfusionMatrixDisplay(confusion_matrix(y_test, y_pred)).plot(cmap="Blues")
plt.title("Matriz de Confusão")
plt.show()
ROC Curve
fpr, tpr, _ = roc_curve(y_test, y_prob)
plt.plot(fpr, tpr, label=f"AUC = {auc(fpr, tpr):.2f}")
plt.plot([0, 1], [0, 1], 'k--')
plt.xlabel("FPR"), plt.ylabel("TPR")
plt.title("Curva ROC")
plt.legend(), plt.grid(True), plt.show()
Precision-Recall
precision, recall, _ = precision_recall_curve(y_test, y_prob)
plt.plot(recall, precision, label=f"AP = {average_precision_score(y_test, y_prob):.2f}")
plt.xlabel("Recall"), plt.ylabel("Precisão")
plt.title("Curva Precision-Recall")
plt.legend(), plt.grid(True), plt.show()
=== Importância das Features ===
rf_model = best_pipeline.named_steps['rf']
feat_imp = pd.DataFrame({'Feature': X.columns, 'Importance': rf_model.feature_importances_})
feat_imp.sort_values(by='Importance', ascending=False, inplace=True)
sns.barplot(data=feat_imp, x='Importance', y='Feature', palette='viridis')
plt.title('Importância das Features')
plt.tight_layout()
plt.show()
=== Salvando o modelo ===
joblib.dump(best_pipeline, 'modelo_churn_rf_pipeline.joblib')
print("Modelo salvo com sucesso!")