{
  "metadata": {
    "title": "AI and Cloud Computing in Business Systems: A Hybrid Model for Enhancing Enterprise Resource Planning",
    "short_title": "AI + Cloud ERP Hybrid Model",
    "authors": [
      "Md Anisur Rahman Chowdhury",
      "Khandakar Rabbi Ahmed",
      "Kefei Wang",
      "Sabrina Mohona",
      "Shahriar Alam Robin",
      "Shah Tawkir Nesar"
    ],
    "publication": {
      "venue": "2025 9th International Conference on Computational System and Information Technology for Sustainable Solutions (CSITSS)",
      "year": 2025,
      "publication_date": "2025-11-20",
      "pages": "1-6",
      "ieee_xplore_url": "https://ieeexplore.ieee.org/abstract/document/11295090",
      "google_scholar_url": "https://scholar.google.com/citations?view_op=view_citation&hl=en&user=NQyywPoAAAAJ&citation_for_view=NQyywPoAAAAJ:iH-uZ7U-co4C",
      "youtube_url": "https://youtu.be/sy4MUNUD4C8",
      "ieee_document_id": "11295090",
      "metadata_note": "Conference and publication metadata are aligned to the external IEEE indexing record. The LaTeX source bundle includes a stale first-page header from an earlier template.",
      "metadata_sources": [
        "User-supplied publication record in prompt.md",
        "J-GLOBAL index listing for the paper title and IEEE document record"
      ]
    },
    "problem": "Traditional ERP systems struggle with flexibility, real-time insight, and scalable decision support when inter-module dependencies become complex.",
    "objective": "Use a hybrid BiLSTM-Attention model with cloud-aware integration concepts to predict ERP module relationships and support modular optimization.",
    "ai_component": "BiLSTM sequence modeling plus attention-based contextual weighting for ERP module dependency prediction.",
    "cloud_component": "Cloud-native integration pathway for dynamic resource allocation, fault tolerance, autoscaling, and microservice-oriented inference.",
    "erp_relevance": "The method targets load balancing, intelligent decision support, risk management, and module-level optimization in enterprise business systems.",
    "results_summary": "The proposed model reached 91.2% accuracy, 89.5% precision, 90.8% recall, and 90.1% F1-score, outperforming logistic regression, random forest, SVM, and GRU baselines.",
    "limitations": "The paper reports conceptual cloud integration and experimental validation, but not a live deployment. Exact raw training logs and the original ERP dataset are not included in the repository."
  },
  "baseline_metrics": [
    {
      "model": "Logistic Regression",
      "family": "Traditional ML",
      "precision": 84.0,
      "recall": 79.5,
      "f1_score": 81.7,
      "accuracy": 82.7
    },
    {
      "model": "Random Forest",
      "family": "Traditional ML",
      "precision": 87.5,
      "recall": 83.0,
      "f1_score": 85.2,
      "accuracy": 85.3
    },
    {
      "model": "Support Vector Machine",
      "family": "Traditional ML",
      "precision": 80.2,
      "recall": 77.0,
      "f1_score": 78.6,
      "accuracy": 79.4
    },
    {
      "model": "Gated Recurrent Unit",
      "family": "Sequence DL",
      "precision": 85.0,
      "recall": 86.5,
      "f1_score": 85.7,
      "accuracy": 83.2
    },
    {
      "model": "BiLSTM-Attention",
      "family": "Proposed",
      "precision": 89.5,
      "recall": 90.8,
      "f1_score": 90.1,
      "accuracy": 91.2
    }
  ],
  "attention_scores": [
    {
      "module": "Module_1",
      "attention_score": 0.07,
      "interpretation": "Low importance; likely non-critical table"
    },
    {
      "module": "Module_2",
      "attention_score": 0.09,
      "interpretation": "Moderate relevance"
    },
    {
      "module": "Module_3",
      "attention_score": 0.05,
      "interpretation": "Minimal impact"
    },
    {
      "module": "Module_4",
      "attention_score": 0.13,
      "interpretation": "High dependency weight"
    },
    {
      "module": "Module_5",
      "attention_score": 0.17,
      "interpretation": "Most influential; likely a central or shared table"
    },
    {
      "module": "Module_6",
      "attention_score": 0.11,
      "interpretation": "Strong contextual role"
    },
    {
      "module": "Module_7",
      "attention_score": 0.04,
      "interpretation": "Negligible attention; possibly isolated module"
    },
    {
      "module": "Module_8",
      "attention_score": 0.12,
      "interpretation": "Important secondary dependency"
    },
    {
      "module": "Module_9",
      "attention_score": 0.1,
      "interpretation": "Supportive of prediction context"
    },
    {
      "module": "Module_10",
      "attention_score": 0.12,
      "interpretation": "High final-context relevance"
    }
  ],
  "hyperparameters": [
    {
      "parameter": "Embedding Dimension",
      "value": "128"
    },
    {
      "parameter": "LSTM Hidden Size",
      "value": "128"
    },
    {
      "parameter": "Attention Vector Size",
      "value": "64"
    },
    {
      "parameter": "Dropout Rate",
      "value": "0.3"
    },
    {
      "parameter": "Learning Rate",
      "value": "0.001"
    },
    {
      "parameter": "Optimizer",
      "value": "Adam"
    },
    {
      "parameter": "Epochs",
      "value": "50"
    },
    {
      "parameter": "Batch Size",
      "value": "32"
    },
    {
      "parameter": "Loss Function",
      "value": "Categorical Crossentropy"
    }
  ],
  "training_curves": {
    "series_note": "Epoch-level training series are visual reconstructions from the published paper figures because raw per-epoch logs are not included in the repository bundle.",
    "data": [
      {
        "epoch": 1,
        "training_accuracy": 82.7,
        "validation_accuracy": 80.3,
        "training_loss": 0.79,
        "validation_loss": 0.86
      },
      {
        "epoch": 2,
        "training_accuracy": 82.8,
        "validation_accuracy": 80.0,
        "training_loss": 0.77,
        "validation_loss": 0.87
      },
      {
        "epoch": 3,
        "training_accuracy": 82.8,
        "validation_accuracy": 81.2,
        "training_loss": 0.78,
        "validation_loss": 0.84
      },
      {
        "epoch": 4,
        "training_accuracy": 83.0,
        "validation_accuracy": 79.3,
        "training_loss": 0.76,
        "validation_loss": 0.84
      },
      {
        "epoch": 5,
        "training_accuracy": 84.1,
        "validation_accuracy": 81.4,
        "training_loss": 0.75,
        "validation_loss": 0.83
      },
      {
        "epoch": 6,
        "training_accuracy": 83.8,
        "validation_accuracy": 81.2,
        "training_loss": 0.72,
        "validation_loss": 0.82
      },
      {
        "epoch": 7,
        "training_accuracy": 84.0,
        "validation_accuracy": 82.0,
        "training_loss": 0.74,
        "validation_loss": 0.81
      },
      {
        "epoch": 8,
        "training_accuracy": 85.1,
        "validation_accuracy": 81.8,
        "training_loss": 0.65,
        "validation_loss": 0.79
      },
      {
        "epoch": 9,
        "training_accuracy": 84.8,
        "validation_accuracy": 82.7,
        "training_loss": 0.67,
        "validation_loss": 0.81
      },
      {
        "epoch": 10,
        "training_accuracy": 84.7,
        "validation_accuracy": 82.4,
        "training_loss": 0.66,
        "validation_loss": 0.76
      },
      {
        "epoch": 11,
        "training_accuracy": 84.8,
        "validation_accuracy": 82.5,
        "training_loss": 0.64,
        "validation_loss": 0.78
      },
      {
        "epoch": 12,
        "training_accuracy": 83.9,
        "validation_accuracy": 81.9,
        "training_loss": 0.65,
        "validation_loss": 0.75
      },
      {
        "epoch": 13,
        "training_accuracy": 85.8,
        "validation_accuracy": 83.2,
        "training_loss": 0.6,
        "validation_loss": 0.74
      },
      {
        "epoch": 14,
        "training_accuracy": 86.0,
        "validation_accuracy": 83.1,
        "training_loss": 0.62,
        "validation_loss": 0.73
      },
      {
        "epoch": 15,
        "training_accuracy": 86.3,
        "validation_accuracy": 83.5,
        "training_loss": 0.59,
        "validation_loss": 0.71
      },
      {
        "epoch": 16,
        "training_accuracy": 85.2,
        "validation_accuracy": 83.8,
        "training_loss": 0.58,
        "validation_loss": 0.74
      },
      {
        "epoch": 17,
        "training_accuracy": 87.0,
        "validation_accuracy": 84.1,
        "training_loss": 0.57,
        "validation_loss": 0.74
      },
      {
        "epoch": 18,
        "training_accuracy": 87.9,
        "validation_accuracy": 84.5,
        "training_loss": 0.56,
        "validation_loss": 0.7
      },
      {
        "epoch": 19,
        "training_accuracy": 87.2,
        "validation_accuracy": 84.8,
        "training_loss": 0.56,
        "validation_loss": 0.68
      },
      {
        "epoch": 20,
        "training_accuracy": 87.8,
        "validation_accuracy": 84.9,
        "training_loss": 0.55,
        "validation_loss": 0.66
      },
      {
        "epoch": 21,
        "training_accuracy": 87.9,
        "validation_accuracy": 85.1,
        "training_loss": 0.55,
        "validation_loss": 0.67
      },
      {
        "epoch": 22,
        "training_accuracy": 87.6,
        "validation_accuracy": 84.8,
        "training_loss": 0.55,
        "validation_loss": 0.66
      },
      {
        "epoch": 23,
        "training_accuracy": 87.6,
        "validation_accuracy": 85.0,
        "training_loss": 0.5,
        "validation_loss": 0.65
      },
      {
        "epoch": 24,
        "training_accuracy": 88.2,
        "validation_accuracy": 86.4,
        "training_loss": 0.47,
        "validation_loss": 0.68
      },
      {
        "epoch": 25,
        "training_accuracy": 89.5,
        "validation_accuracy": 85.2,
        "training_loss": 0.47,
        "validation_loss": 0.62
      },
      {
        "epoch": 26,
        "training_accuracy": 89.2,
        "validation_accuracy": 86.4,
        "training_loss": 0.46,
        "validation_loss": 0.64
      },
      {
        "epoch": 27,
        "training_accuracy": 89.7,
        "validation_accuracy": 85.6,
        "training_loss": 0.46,
        "validation_loss": 0.61
      },
      {
        "epoch": 28,
        "training_accuracy": 89.4,
        "validation_accuracy": 85.9,
        "training_loss": 0.45,
        "validation_loss": 0.59
      },
      {
        "epoch": 29,
        "training_accuracy": 89.3,
        "validation_accuracy": 86.6,
        "training_loss": 0.41,
        "validation_loss": 0.58
      },
      {
        "epoch": 30,
        "training_accuracy": 90.6,
        "validation_accuracy": 87.2,
        "training_loss": 0.44,
        "validation_loss": 0.56
      },
      {
        "epoch": 31,
        "training_accuracy": 90.7,
        "validation_accuracy": 86.7,
        "training_loss": 0.43,
        "validation_loss": 0.55
      },
      {
        "epoch": 32,
        "training_accuracy": 89.8,
        "validation_accuracy": 87.8,
        "training_loss": 0.39,
        "validation_loss": 0.55
      },
      {
        "epoch": 33,
        "training_accuracy": 91.7,
        "validation_accuracy": 87.5,
        "training_loss": 0.33,
        "validation_loss": 0.54
      },
      {
        "epoch": 34,
        "training_accuracy": 91.8,
        "validation_accuracy": 88.0,
        "training_loss": 0.4,
        "validation_loss": 0.51
      },
      {
        "epoch": 35,
        "training_accuracy": 91.0,
        "validation_accuracy": 87.7,
        "training_loss": 0.34,
        "validation_loss": 0.5
      },
      {
        "epoch": 36,
        "training_accuracy": 91.6,
        "validation_accuracy": 88.1,
        "training_loss": 0.37,
        "validation_loss": 0.5
      },
      {
        "epoch": 37,
        "training_accuracy": 91.4,
        "validation_accuracy": 87.8,
        "training_loss": 0.33,
        "validation_loss": 0.47
      },
      {
        "epoch": 38,
        "training_accuracy": 91.0,
        "validation_accuracy": 87.9,
        "training_loss": 0.32,
        "validation_loss": 0.48
      },
      {
        "epoch": 39,
        "training_accuracy": 92.6,
        "validation_accuracy": 88.4,
        "training_loss": 0.29,
        "validation_loss": 0.47
      },
      {
        "epoch": 40,
        "training_accuracy": 93.2,
        "validation_accuracy": 88.2,
        "training_loss": 0.33,
        "validation_loss": 0.48
      },
      {
        "epoch": 41,
        "training_accuracy": 93.6,
        "validation_accuracy": 88.1,
        "training_loss": 0.36,
        "validation_loss": 0.43
      },
      {
        "epoch": 42,
        "training_accuracy": 92.5,
        "validation_accuracy": 89.5,
        "training_loss": 0.34,
        "validation_loss": 0.46
      },
      {
        "epoch": 43,
        "training_accuracy": 92.6,
        "validation_accuracy": 89.9,
        "training_loss": 0.32,
        "validation_loss": 0.41
      },
      {
        "epoch": 44,
        "training_accuracy": 93.7,
        "validation_accuracy": 89.6,
        "training_loss": 0.28,
        "validation_loss": 0.39
      },
      {
        "epoch": 45,
        "training_accuracy": 93.7,
        "validation_accuracy": 90.2,
        "training_loss": 0.25,
        "validation_loss": 0.4
      },
      {
        "epoch": 46,
        "training_accuracy": 93.4,
        "validation_accuracy": 90.1,
        "training_loss": 0.24,
        "validation_loss": 0.37
      },
      {
        "epoch": 47,
        "training_accuracy": 94.6,
        "validation_accuracy": 89.2,
        "training_loss": 0.24,
        "validation_loss": 0.38
      },
      {
        "epoch": 48,
        "training_accuracy": 93.7,
        "validation_accuracy": 90.1,
        "training_loss": 0.23,
        "validation_loss": 0.41
      },
      {
        "epoch": 49,
        "training_accuracy": 95.0,
        "validation_accuracy": 90.7,
        "training_loss": 0.22,
        "validation_loss": 0.37
      },
      {
        "epoch": 50,
        "training_accuracy": 95.5,
        "validation_accuracy": 91.4,
        "training_loss": 0.21,
        "validation_loss": 0.35
      }
    ]
  }
}
