- Journal
- Preprints
- Conference
- Book Chapters
- Thesis
2021 |
|
79. | Public defibrillator accessibility and mobility trends during the COVID-19 pandemic in Canada Journal Article Forthcoming K. H. B. Leung, R. Alam, S. C. Brooks, T. C. Y. Chan Resuscitation, Forthcoming. @article{ChanTCY.J079, title = {Public defibrillator accessibility and mobility trends during the COVID-19 pandemic in Canada}, author = {K. H. B. Leung and R. Alam and S. C. Brooks and T. C. Y. Chan}, year = {2021}, date = {2021-01-11}, journal = {Resuscitation}, keywords = {}, pubstate = {forthcoming}, tppubtype = {article} } |
78. | OpenKBP: The Open-Access Knowledge-Based Planning Grand Challenge Journal Article Forthcoming A. Babier, B. Zhang, R. Mahmood, K. L. Moore, T. G. Purdie, A. L. McNiven, T. C. Y. Chan Medical Physics, Forthcoming. @article{ChanTCY.J078, title = {OpenKBP: The Open-Access Knowledge-Based Planning Grand Challenge}, author = {A. Babier and B. Zhang and R. Mahmood and K. L. Moore and T. G. Purdie and A. L. McNiven and T. C. Y. Chan}, year = {2021}, date = {2021-01-08}, journal = {Medical Physics}, keywords = {}, pubstate = {forthcoming}, tppubtype = {article} } |
2020 |
|
77. | Machine learning-based risk stratification for early detection of diabetes and hypertension in resource-limited settings Journal Article Forthcoming J. J. Boutilier, T. C. Y. Chan, M. Ranjan, S. Deo Journal of Medical Internet Research, Forthcoming. @article{ChanTCY.J077, title = {Machine learning-based risk stratification for early detection of diabetes and hypertension in resource-limited settings}, author = {J. J. Boutilier and T. C. Y. Chan and M. Ranjan and S. Deo}, year = {2020}, date = {2020-12-19}, journal = {Journal of Medical Internet Research}, howpublished = {under revision for JMIR}, keywords = {}, pubstate = {forthcoming}, tppubtype = {article} } |
76. | Predicting depression from hearing loss using machine learning Journal Article Forthcoming M. G. Crowson, K. H. Franck, L. C. Rosella, T. C. Y. Chan Ear and Hearing, Forthcoming. @article{ChanTCY.J076, title = {Predicting depression from hearing loss using machine learning}, author = {M. G. Crowson and K. H. Franck and L. C. Rosella and T. C. Y. Chan}, year = {2020}, date = {2020-10-23}, journal = {Ear and Hearing}, keywords = {}, pubstate = {forthcoming}, tppubtype = {article} } |
75. | An ensemble learning framework for model fitting and evaluation in inverse linear optimization Journal Article Forthcoming A. Babier, T. C. Y. Chan, T. Lee, R. Mahmood, D. Terekhov INFORMS Journal on Optimization, Forthcoming. @article{ChanTCY.J075, title = {An ensemble learning framework for model fitting and evaluation in inverse linear optimization}, author = {A. Babier and T. C. Y. Chan and T. Lee and R. Mahmood and D. Terekhov}, url = {http://arxiv.org/abs/1804.04576}, year = {2020}, date = {2020-10-06}, journal = {INFORMS Journal on Optimization}, abstract = {We develop a generalized inverse optimization framework for fitting the cost vector of a single linear optimization problem given an ensemble of observed decisions. We unify multiple variants in the inverse optimization literature under a common template and derive assumption-free and exact solution methods for each variant. We extend a goodness-of-fit metric previously introduced for the problem with a single observed decision to this new setting, proving and numerically demonstrating several important properties. Finally, to illustrate our framework, we develop a novel inverse optimization-driven procedure for automated radiation therapy treatment planning. Here, the inverse optimization model leverages the combined power of an ensemble of dose predictions produced by different machine learning models to construct clinical treatment plans that better trade off between the competing clinical objectives that are used for plan evaluation in practice.}, howpublished = {under second review at INFORMS Journal on Optimization}, keywords = {}, pubstate = {forthcoming}, tppubtype = {article} } We develop a generalized inverse optimization framework for fitting the cost vector of a single linear optimization problem given an ensemble of observed decisions. We unify multiple variants in the inverse optimization literature under a common template and derive assumption-free and exact solution methods for each variant. We extend a goodness-of-fit metric previously introduced for the problem with a single observed decision to this new setting, proving and numerically demonstrating several important properties. Finally, to illustrate our framework, we develop a novel inverse optimization-driven procedure for automated radiation therapy treatment planning. Here, the inverse optimization model leverages the combined power of an ensemble of dose predictions produced by different machine learning models to construct clinical treatment plans that better trade off between the competing clinical objectives that are used for plan evaluation in practice. |
74. | Sampling from the complement of a polyhedron: An MCMC algorithm for data augmentation Journal Article T. C. Y. Chan, A. Diamant, R. Mahmood Operations Research Letters, Vol. 48, pp. 744–751, 2020. @article{ChanTCY.J074, title = {Sampling from the complement of a polyhedron: An MCMC algorithm for data augmentation}, author = {T. C. Y. Chan and A. Diamant and R. Mahmood}, url = {https://chanlab.mie.utoronto.ca/wp-content/uploads/2020/09/74-2020-Chan-ORL-MCMCalgorithm.pdf}, doi = {10.1016/j.orl.2020.08.014}, year = {2020}, date = {2020-09-03}, journal = {Operations Research Letters}, volume = {48}, pages = {744–751}, abstract = {We present an MCMC algorithm for sampling from the complement of a polyhedron. Our approach is based on the Shake-and-bake algorithm for sampling from the boundary of a set and provably covers the complement. We use this algorithm for data augmentation in a machine learning task of classifying a hidden feasible set in a data-driven optimization pipeline. Numerical results on simulated and MIPLIB instances demonstrate that our algorithm, along with a supervised learning technique, outperforms conventional unsupervised baselines.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We present an MCMC algorithm for sampling from the complement of a polyhedron. Our approach is based on the Shake-and-bake algorithm for sampling from the boundary of a set and provably covers the complement. We use this algorithm for data augmentation in a machine learning task of classifying a hidden feasible set in a data-driven optimization pipeline. Numerical results on simulated and MIPLIB instances demonstrate that our algorithm, along with a supervised learning technique, outperforms conventional unsupervised baselines. |
73. | F. Razak, S. Shin, F. Pogacar, H. Y. Jung, L. Pus, A. Moser, L. Lapointe-Shaw, T. Tang, J. L. Kwan, A. Weinerman, S. Rawal, V. Kushnir, D. Mak, D. Martin, K. G. Shojania, S. Bhatia, P. Agarwal, G. Mukerji, M. Fralick, M. K. Kapral, M. Morgan, B. Wong, T. C. Y. Chan, A. A. Verma CMAJ Open, Vol. 8, pp. E514-E521, 2020. @article{ChanTCY.J073, title = {Modelling resource requirements and physician staffing to provide virtual urgent medical care for residents of long-term care homes: a cross-sectional study}, author = {F. Razak and S. Shin and F. Pogacar and H. Y. Jung and L. Pus and A. Moser and L. Lapointe-Shaw and T. Tang and J. L. Kwan and A. Weinerman and S. Rawal and V. Kushnir and D. Mak and D. Martin and K. G. Shojania and S. Bhatia and P. Agarwal and G. Mukerji and M. Fralick and M. K. Kapral and M. Morgan and B. Wong and T. C. Y. Chan and A. A. Verma}, doi = {10.9778/cmajo.20200098}, year = {2020}, date = {2020-08-20}, journal = {CMAJ Open}, volume = {8}, pages = {E514-E521}, abstract = {Background: The coronavirus disease 2019 (COVID-19) outbreak increases the importance of strategies to enhance urgent medical care delivery in long-term care (LTC) facilities that could potentially reduce transfers to emergency departments. The study objective was to model resource requirements to deliver virtual urgent medical care in LTC facilities. Methods: We used data from all general medicine inpatient admissions at 7 hospitals in the Greater Toronto Area, Ontario, Canada, over a 7.5-year period (Apr. 1, 2010, to Oct. 31, 2017) to estimate historical patterns of hospital resource use by LTC residents. We estimated an upper bound of potentially avoidable transfers by combining data on short admissions (≤ 72 h) with historical data on the proportion of transfers from LTC facilities for which patients were discharged from the emergency department without admission. Regression models were used to extrapolate future resource requirements, and queuing models were used to estimate physician staffing requirements to perform virtual assessments. Results: There were 235 375 admissions to general medicine wards, and residents of LTC facilities (age 16 yr or older) accounted for 9.3% (n = 21 948) of these admissions. Among the admissions of residents of LTC facilities, short admissions constituted 24.1% (n = 5297), and for 99.8% (n = 5284) of these admissions, the patient received laboratory testing, for 86.9% (n = 4604) the patient received plain radiography, for 41.5% (n = 2197) the patient received computed tomography and for 81.2% (n = 4300) the patient received intravenous medications. If all patients who have short admissions and are transferred from the emergency department were diverted to outpatient care, the average weekly demand for outpatient imaging per hospital would be 2.6 ultrasounds, 11.9 computed tomographic scans and 23.9 radiographs per week. The average daily volume of urgent medical virtual assessments would range from 2.0 to 5.8 per hospital. A single centralized virtual assessment centre staffed by 2 or 3 physicians would provide services similar in efficiency (measured by waiting time for physician assessment) to 7 separate centres staffed by 1 physician each. Interpretation: The provision of acute medical care to LTC residents at their facility would probably require rapid access to outpatient diagnostic imaging, within-facility access to laboratory services and intravenous medication and virtual consultations with physicians. The results of this study can inform efforts to deliver urgent medical care in LTC facilities in light of a potential surge in COVID-19 cases.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Background: The coronavirus disease 2019 (COVID-19) outbreak increases the importance of strategies to enhance urgent medical care delivery in long-term care (LTC) facilities that could potentially reduce transfers to emergency departments. The study objective was to model resource requirements to deliver virtual urgent medical care in LTC facilities. Methods: We used data from all general medicine inpatient admissions at 7 hospitals in the Greater Toronto Area, Ontario, Canada, over a 7.5-year period (Apr. 1, 2010, to Oct. 31, 2017) to estimate historical patterns of hospital resource use by LTC residents. We estimated an upper bound of potentially avoidable transfers by combining data on short admissions (≤ 72 h) with historical data on the proportion of transfers from LTC facilities for which patients were discharged from the emergency department without admission. Regression models were used to extrapolate future resource requirements, and queuing models were used to estimate physician staffing requirements to perform virtual assessments. Results: There were 235 375 admissions to general medicine wards, and residents of LTC facilities (age 16 yr or older) accounted for 9.3% (n = 21 948) of these admissions. Among the admissions of residents of LTC facilities, short admissions constituted 24.1% (n = 5297), and for 99.8% (n = 5284) of these admissions, the patient received laboratory testing, for 86.9% (n = 4604) the patient received plain radiography, for 41.5% (n = 2197) the patient received computed tomography and for 81.2% (n = 4300) the patient received intravenous medications. If all patients who have short admissions and are transferred from the emergency department were diverted to outpatient care, the average weekly demand for outpatient imaging per hospital would be 2.6 ultrasounds, 11.9 computed tomographic scans and 23.9 radiographs per week. The average daily volume of urgent medical virtual assessments would range from 2.0 to 5.8 per hospital. A single centralized virtual assessment centre staffed by 2 or 3 physicians would provide services similar in efficiency (measured by waiting time for physician assessment) to 7 separate centres staffed by 1 physician each. Interpretation: The provision of acute medical care to LTC residents at their facility would probably require rapid access to outpatient diagnostic imaging, within-facility access to laboratory services and intravenous medication and virtual consultations with physicians. The results of this study can inform efforts to deliver urgent medical care in LTC facilities in light of a potential surge in COVID-19 cases. |
72. | AutoAudio: Deep learning for automatic audiogram interpretation Journal Article M. G. Crowson, J. W. Lee, A. Hamour, R. Mahmood, V. Lin, D. L. Tucci, T. C. Y. Chan Journal of Medical Systems, Vol. 44 (Article No. 163), 2020. @article{ChanTCY.J072, title = {AutoAudio: Deep learning for automatic audiogram interpretation}, author = {M. G. Crowson and J. W. Lee and A. Hamour and R. Mahmood and V. Lin and D. L. Tucci and T. C. Y. Chan}, doi = {10.1007/s10916-020-01627-1}, year = {2020}, date = {2020-08-03}, journal = {Journal of Medical Systems}, volume = {44 (Article No. 163)}, abstract = {Hearing loss is the leading human sensory system loss, and one of the leading causes for years lived with disability with significant effects on quality of life, social isolation, and overall health. Coupled with a forecast of increased hearing loss burden worldwide, national and international health organizations have urgently recommended that access to hearing evaluation be expanded to meet demand. The objective of this study was to develop ‘AutoAudio’ – a novel deep learning proof-of-concept model that accurately and quickly interprets diagnostic audiograms. Adult audiogram reports representing normal, conductive, mixed and sensorineural morphologies were used to train different neural network architectures. Image augmentation techniques were used to increase the training image set size. Classification accuracy on a separate test set was used to assess model performance. The architecture with the highest out-of-training set accuracy was ResNet-101 at 97.5%. Neural network training time varied between 2 to 7 h depending on the depth of the neural network architecture. Each neural network architecture produced misclassifications that arose from failures of the model to correctly label the audiogram with the appropriate hearing loss type. The most commonly misclassified hearing loss type were mixed losses. Re-engineering the process of hearing testing with a machine learning innovation may help enhance access to the growing worldwide population that is expected to require audiologist services. Our results suggest that deep learning may be a transformative technology that enables automatic and accurate audiogram interpretation.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Hearing loss is the leading human sensory system loss, and one of the leading causes for years lived with disability with significant effects on quality of life, social isolation, and overall health. Coupled with a forecast of increased hearing loss burden worldwide, national and international health organizations have urgently recommended that access to hearing evaluation be expanded to meet demand. The objective of this study was to develop ‘AutoAudio’ – a novel deep learning proof-of-concept model that accurately and quickly interprets diagnostic audiograms. Adult audiogram reports representing normal, conductive, mixed and sensorineural morphologies were used to train different neural network architectures. Image augmentation techniques were used to increase the training image set size. Classification accuracy on a separate test set was used to assess model performance. The architecture with the highest out-of-training set accuracy was ResNet-101 at 97.5%. Neural network training time varied between 2 to 7 h depending on the depth of the neural network architecture. Each neural network architecture produced misclassifications that arose from failures of the model to correctly label the audiogram with the appropriate hearing loss type. The most commonly misclassified hearing loss type were mixed losses. Re-engineering the process of hearing testing with a machine learning innovation may help enhance access to the growing worldwide population that is expected to require audiologist services. Our results suggest that deep learning may be a transformative technology that enables automatic and accurate audiogram interpretation. |
71. | C. L. F. Sun, L. Karlsson, L. J. Morrison, S. C. Brooks, F. Folke, T. C. Y. Chan Journal of the American Heart Association, Vol. 9 (Article No. e016701), 2020. @article{ChanTCY.J071, title = {Effect of optimized versus guidelines-based automated external defibrillator placement on out-of-hospital cardiac arrest coverage: an in silico trial}, author = {C. L. F. Sun and L. Karlsson and L. J. Morrison and S. C. Brooks and F. Folke and T. C. Y. Chan}, doi = {10.1161/JAHA.120.016701}, year = {2020}, date = {2020-08-01}, journal = {Journal of the American Heart Association}, volume = {9 (Article No. e016701)}, abstract = {BACKGROUND: Mathematical optimization of automated external defibrillator (AED) placement may improve AED accessibil- ity and out-of-hospital cardiac arrest (OHCA) outcomes compared with American Heart Association (AHA) and European Resuscitation Council (ERC) placement guidelines. We conducted an in silico trial (simulated prospective cohort study) com- paring mathematically optimized placements with placements derived from current AHA and ERC guidelines, which recommend placement in locations where OHCAs are usually witnessed. METHODS AND RESULTS: We identified all public OHCAs of presumed cardiac cause from 2008 to 2016 in Copenhagen, Denmark. For the control, we computationally simulated placing 24/7-accessible AEDs at every unique, public, witnessed OHCA location at monthly intervals over the study period. The intervention consisted of an equal number of simulated AEDs placements, deployed monthly, at mathematically optimized locations, using a model that analyzed historical OHCAs before that month. For each approach, we calculated the number of OHCAs in the study period that occurred within a 100-m route distance based on Copenhagen’s road network of an available AED after it was placed (“OHCA coverage”). Estimated impact on bystander defibrillation and 30-day survival was calculated by multivariate logistic regression. The control scenario involved 393 AEDs at historical, public, witnessed OHCA locations, covering 15.8% of the 653 public OHCAs from 2008 to 2016. The optimized locations provided significantly higher coverage (24.2%; P<0.001). Estimated bystander defibrillation and 30-day survival rates increased from 15.6% to 18.2% (P<0.05) and from 32.6% to 34.0% (P<0.05), respectively. As a baseline, the 1573 real AEDs in Copenhagen covered 14.4% of the OHCAs. CONCLUSIONS: Mathematical optimization can significantly improve OHCA coverage and estimated clinical outcomes compared with a guidelines-based approach to AED placement.}, keywords = {}, pubstate = {published}, tppubtype = {article} } BACKGROUND: Mathematical optimization of automated external defibrillator (AED) placement may improve AED accessibil- ity and out-of-hospital cardiac arrest (OHCA) outcomes compared with American Heart Association (AHA) and European Resuscitation Council (ERC) placement guidelines. We conducted an in silico trial (simulated prospective cohort study) com- paring mathematically optimized placements with placements derived from current AHA and ERC guidelines, which recommend placement in locations where OHCAs are usually witnessed. METHODS AND RESULTS: We identified all public OHCAs of presumed cardiac cause from 2008 to 2016 in Copenhagen, Denmark. For the control, we computationally simulated placing 24/7-accessible AEDs at every unique, public, witnessed OHCA location at monthly intervals over the study period. The intervention consisted of an equal number of simulated AEDs placements, deployed monthly, at mathematically optimized locations, using a model that analyzed historical OHCAs before that month. For each approach, we calculated the number of OHCAs in the study period that occurred within a 100-m route distance based on Copenhagen’s road network of an available AED after it was placed (“OHCA coverage”). Estimated impact on bystander defibrillation and 30-day survival was calculated by multivariate logistic regression. The control scenario involved 393 AEDs at historical, public, witnessed OHCA locations, covering 15.8% of the 653 public OHCAs from 2008 to 2016. The optimized locations provided significantly higher coverage (24.2%; P<0.001). Estimated bystander defibrillation and 30-day survival rates increased from 15.6% to 18.2% (P<0.05) and from 32.6% to 34.0% (P<0.05), respectively. As a baseline, the 1573 real AEDs in Copenhagen covered 14.4% of the OHCAs. CONCLUSIONS: Mathematical optimization can significantly improve OHCA coverage and estimated clinical outcomes compared with a guidelines-based approach to AED placement. |
70. | Machine learning as a catalyst for value-based health care Journal Article M. G. Crowson, T. C. Y. Chan Journal of Medical Systems, Vol. 44 (Article No. 139), 2020. @article{ChanTCY.J070, title = {Machine learning as a catalyst for value-based health care}, author = {M. G. Crowson and T. C. Y. Chan}, doi = {10.1007/s10916-020-01607-5}, year = {2020}, date = {2020-07-18}, journal = {Journal of Medical Systems}, volume = {44 (Article No. 139)}, keywords = {}, pubstate = {published}, tppubtype = {article} } |
69. | Machine learning for pattern detection in cochlear implant FDA adverse event reports Journal Article M. G. Crowson, A. Hamour, V. Lin, J. M. Chen, T. C. Y. Chan Cochlear Implants International, Vol. 21, pp. 313-322, 2020. @article{ChanTCY.J069, title = {Machine learning for pattern detection in cochlear implant FDA adverse event reports}, author = {M. G. Crowson and A. Hamour and V. Lin and J. M. Chen and T. C. Y. Chan}, doi = {10.1080/14670100.2020.1784569}, year = {2020}, date = {2020-07-05}, journal = {Cochlear Implants International}, volume = {21}, pages = {313-322}, keywords = {}, pubstate = {published}, tppubtype = {article} } |
68. | Improving access to automated external defibrillators in rural and remote settings: A drone delivery feasibility study Journal Article S. Cheskes, S. L. McLeod, M. Nolan, P. Snobelen, C. Vaillancourt, S. C. Brooks, K. N. Dainty, T. C. Y. Chan, I. R. Drennan Journal of the American Heart Association, Vol. 9 (Article No. e016687), 2020. @article{ChanTCY.J068, title = {Improving access to automated external defibrillators in rural and remote settings: A drone delivery feasibility study}, author = {S. Cheskes and S. L. McLeod and M. Nolan and P. Snobelen and C. Vaillancourt and S. C. Brooks and K. N. Dainty and T. C. Y. Chan and I. R. Drennan}, doi = {10.1161/JAHA.120.016687}, year = {2020}, date = {2020-07-01}, journal = {Journal of the American Heart Association}, volume = {9 (Article No. e016687)}, keywords = {}, pubstate = {published}, tppubtype = {article} } |
67. | Points gained in football: Using Markov process-based value functions to assess team performance Journal Article Forthcoming T. C. Y. Chan, C. Fernandes, M. L. Puterman Operations Research, Forthcoming. @article{ChanTCY.J067, title = {Points gained in football: Using Markov process-based value functions to assess team performance}, author = {T. C. Y. Chan and C. Fernandes and M. L. Puterman}, year = {2020}, date = {2020-06-29}, journal = {Operations Research}, keywords = {}, pubstate = {forthcoming}, tppubtype = {article} } |
66. | Optimal in-hospital defibrillator placement Journal Article K. H. B. Leung, C. L. F. Sun, M. Yang, K. S. Allan, N. Wong, T. C. Y. Chan Resuscitation, Vol. 151, pp. 91-98, 2020. @article{ChanTCY.J066, title = {Optimal in-hospital defibrillator placement}, author = {K. H. B. Leung and C. L. F. Sun and M. Yang and K. S. Allan and N. Wong and T. C. Y. Chan}, doi = {10.1016/j.resuscitation.2020.03.018}, year = {2020}, date = {2020-03-24}, journal = {Resuscitation}, volume = {151}, pages = {91-98}, abstract = {Aims: To determine if mathematical optimization of in-hospital defibrillator placements can reduce in-hospital cardiac arrest-to-defibrillator distance compared to existing defibrillators in a single hospital. Methods: We identified treated IHCAs and defibrillator placements in St. Michael's Hospital in Toronto, Canada from Jan. 2013 to Jun. 2017 and mapped them to a 3-D computer model of the hospital. An optimization model identified an equal number of optimal defibrillator locations that minimized the average distance between IHCAs and the closest defibrillator using a 10-fold cross-validation approach. The optimized and existing defibrillator locations were compared in terms of average distance to the out-of-sample IHCAs. We repeated the analysis excluding intensive care units (ICUs), operating theatres (OTs), and the emergency department (ED). We also re-solved the model using fewer defibrillators to determine when the average distance matched the performance of existing defibrillators. Results: We identified 433 treated IHCAs and 53 defibrillators. Of these, 167 IHCAs and 31 defibrillators were outside of ICUs, OTs, and the ED. Optimal defibrillator placements reduced the average IHCA-to-defibrillator distance from 16.1 m to 2.7 m (relative decrease of 83.0%; P = 0.002) compared to existing defibrillator placements. For non-ICU/OT/ED IHCAs, the average distance was reduced from 24.4 m to 11.9 m (relative decrease of 51.3%; P = 0.002). 8‒9 optimized defibrillator locations were sufficient to match the average IHCA-to-defibrillator distance of existing defibrillator placements. Conclusions: Optimization-guided placement of in-hospital defibrillators can reduce the distance from an IHCA to the closest defibrillator. Equivalently, optimization can match existing defibrillator performance using far fewer defibrillators.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Aims: To determine if mathematical optimization of in-hospital defibrillator placements can reduce in-hospital cardiac arrest-to-defibrillator distance compared to existing defibrillators in a single hospital. Methods: We identified treated IHCAs and defibrillator placements in St. Michael's Hospital in Toronto, Canada from Jan. 2013 to Jun. 2017 and mapped them to a 3-D computer model of the hospital. An optimization model identified an equal number of optimal defibrillator locations that minimized the average distance between IHCAs and the closest defibrillator using a 10-fold cross-validation approach. The optimized and existing defibrillator locations were compared in terms of average distance to the out-of-sample IHCAs. We repeated the analysis excluding intensive care units (ICUs), operating theatres (OTs), and the emergency department (ED). We also re-solved the model using fewer defibrillators to determine when the average distance matched the performance of existing defibrillators. Results: We identified 433 treated IHCAs and 53 defibrillators. Of these, 167 IHCAs and 31 defibrillators were outside of ICUs, OTs, and the ED. Optimal defibrillator placements reduced the average IHCA-to-defibrillator distance from 16.1 m to 2.7 m (relative decrease of 83.0%; P = 0.002) compared to existing defibrillator placements. For non-ICU/OT/ED IHCAs, the average distance was reduced from 24.4 m to 11.9 m (relative decrease of 51.3%; P = 0.002). 8‒9 optimized defibrillator locations were sufficient to match the average IHCA-to-defibrillator distance of existing defibrillator placements. Conclusions: Optimization-guided placement of in-hospital defibrillators can reduce the distance from an IHCA to the closest defibrillator. Equivalently, optimization can match existing defibrillator performance using far fewer defibrillators. |
65. | The importance of evaluating the complete automated knowledge-based planning pipeline Journal Article A. Babier, R. Mahmood, A. L. McNiven, A. Diamant, T. C. Y. Chan Physica Medica, Vol. 72, pp. 73-79, 2020. @article{ChanTCY.J065, title = {The importance of evaluating the complete automated knowledge-based planning pipeline}, author = {A. Babier and R. Mahmood and A. L. McNiven and A. Diamant and T. C. Y. Chan}, doi = {10.1016/j.ejmp.2020.03.016}, year = {2020}, date = {2020-03-17}, journal = {Physica Medica}, volume = {72}, pages = {73-79}, abstract = {We determine how prediction methods combine with optimization methods in two-stage knowledge-based planning (KBP) pipelines to produce radiation therapy treatment plans. We trained two dose prediction methods, a generative adversarial network (GAN) and a random forest (RF) with the same 130 treatment plans. The models were applied to 87 out-of-sample patients to create two sets of predicted dose distributions that were used as input to two optimization models. The first optimization model, inverse planning (IP), estimates weights for dose-objectives from a predicted dose distribution and generates new plans using conventional inverse planning. The second optimization model, dose mimicking (DM), minimizes the sum of one-sided quadratic penalties between the predictions and the generated plans using several dose-objectives. Altogether, four KBP pipelines (GAN-IP, GAN-DM, RF-IP, and RF-DM) were constructed and benchmarked against the corresponding clinical plans using clinical criteria; the error of both prediction methods was also evaluated. The best performing plans were GAN-IP plans, which satisfied the same criteria as their corresponding clinical plans (78%) more often than any other KBP pipeline. However, GAN did not necessarily provide the best prediction for the second-stage optimization models. Specifically, both the RF-IP and RF-DM plans satisfied the same criteria as the clinical plans 25% and 15% more often than GAN-DM plans (the worst performing plans), respectively. GAN predictions also had a higher mean absolute error (3.9 Gy) than those from RF (3.6 Gy). We find that state-of-the-art prediction methods when paired with different optimization algorithms, produce treatment plans with considerable variation in quality.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We determine how prediction methods combine with optimization methods in two-stage knowledge-based planning (KBP) pipelines to produce radiation therapy treatment plans. We trained two dose prediction methods, a generative adversarial network (GAN) and a random forest (RF) with the same 130 treatment plans. The models were applied to 87 out-of-sample patients to create two sets of predicted dose distributions that were used as input to two optimization models. The first optimization model, inverse planning (IP), estimates weights for dose-objectives from a predicted dose distribution and generates new plans using conventional inverse planning. The second optimization model, dose mimicking (DM), minimizes the sum of one-sided quadratic penalties between the predictions and the generated plans using several dose-objectives. Altogether, four KBP pipelines (GAN-IP, GAN-DM, RF-IP, and RF-DM) were constructed and benchmarked against the corresponding clinical plans using clinical criteria; the error of both prediction methods was also evaluated. The best performing plans were GAN-IP plans, which satisfied the same criteria as their corresponding clinical plans (78%) more often than any other KBP pipeline. However, GAN did not necessarily provide the best prediction for the second-stage optimization models. Specifically, both the RF-IP and RF-DM plans satisfied the same criteria as the clinical plans 25% and 15% more often than GAN-DM plans (the worst performing plans), respectively. GAN predictions also had a higher mean absolute error (3.9 Gy) than those from RF (3.6 Gy). We find that state-of-the-art prediction methods when paired with different optimization algorithms, produce treatment plans with considerable variation in quality. |
64. | Improving bystander defibrillation in out-of-hospital cardiac arrests at home Journal Article L. Karlsson, C. M. Hansen, C. Vourakis, C. L. F. Sun, S. Rajan, K. B. Sondergaard, L. Andelius, F. Lippert, G. H. Gislason, T. C. Y. Chan, C. Torp-Pedersen, F. Folke European Heart Journal: Acute Cardiovascular Care, Vol. 9(S4), pp. S74-S81, 2020. @article{ChanTCY.J064, title = {Improving bystander defibrillation in out-of-hospital cardiac arrests at home}, author = {L. Karlsson and C. M. Hansen and C. Vourakis and C. L. F. Sun and S. Rajan and K. B. Sondergaard and L. Andelius and F. Lippert and G. H. Gislason and T. C. Y. Chan and C. Torp-Pedersen and F. Folke}, doi = {10.1177%2F2048872619891675}, year = {2020}, date = {2020-01-20}, journal = {European Heart Journal: Acute Cardiovascular Care}, volume = {9}, number = {S4}, pages = {S74-S81}, abstract = {Aims: Most out-of-hospital cardiac arrests occur at home with dismal bystander defibrillation rates. We investigated automated external defibrillator coverage of home arrests, and the proportion potentially reachable with an automated external defibrillator before emergency medical service arrival according to different bystander activation strategies. Methods and results: Cardiac arrests in homes (private/nursing/senior homes) in Copenhagen, Denmark (2008–2016) and registered automated external defibrillators (2007–2016), were identified. Automated external defibrillator coverage (distance from arrest to automated external defibrillator) and accessibility at the time of arrest were examined according to route distance to nearest automated external defibrillator and emergency medical service response time. The proportion of arrests reachable with an automated external defibrillator by bystander was calculated using two-way (from patient to automated external defibrillator and back) and one-way (from automated external defibrillator to patient) potential activation strategies. Of 1879 home arrests, automated external defibrillator coverage ≤100 m was low (6.3%) and a two-way bystander could potentially only retrieve an accessible automated external defibrillator before emergency medical service in 31.1% (n=37) of cases. If a bystander only needed to travel one-way to bring an automated external defibrillator (≤100 m, ≤250 m and ≤500 m), 45.4% (n=54/119), 37.1% (n=196/529) and 29.8% (n=350/1174) could potentially be reached before the emergency medical service based on current automated external defibrillator accessibility. Conclusions: Few home arrests were reachable with an automated external defibrillator before emergency medical service if bystanders needed to travel from patient to automated external defibrillator and back. However, nearly one-third of arrests ≤500 m of an automated external defibrillator could be reached before emergency medical service arrival if the bystander only needed to travel one-way from the automated external defibrillator to the patient.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Aims: Most out-of-hospital cardiac arrests occur at home with dismal bystander defibrillation rates. We investigated automated external defibrillator coverage of home arrests, and the proportion potentially reachable with an automated external defibrillator before emergency medical service arrival according to different bystander activation strategies. Methods and results: Cardiac arrests in homes (private/nursing/senior homes) in Copenhagen, Denmark (2008–2016) and registered automated external defibrillators (2007–2016), were identified. Automated external defibrillator coverage (distance from arrest to automated external defibrillator) and accessibility at the time of arrest were examined according to route distance to nearest automated external defibrillator and emergency medical service response time. The proportion of arrests reachable with an automated external defibrillator by bystander was calculated using two-way (from patient to automated external defibrillator and back) and one-way (from automated external defibrillator to patient) potential activation strategies. Of 1879 home arrests, automated external defibrillator coverage ≤100 m was low (6.3%) and a two-way bystander could potentially only retrieve an accessible automated external defibrillator before emergency medical service in 31.1% (n=37) of cases. If a bystander only needed to travel one-way to bring an automated external defibrillator (≤100 m, ≤250 m and ≤500 m), 45.4% (n=54/119), 37.1% (n=196/529) and 29.8% (n=350/1174) could potentially be reached before the emergency medical service based on current automated external defibrillator accessibility. Conclusions: Few home arrests were reachable with an automated external defibrillator before emergency medical service if bystanders needed to travel from patient to automated external defibrillator and back. However, nearly one-third of arrests ≤500 m of an automated external defibrillator could be reached before emergency medical service arrival if the bystander only needed to travel one-way from the automated external defibrillator to the patient. |
63. | Predicting post-operative cochlear implant performance using supervised machine learning Journal Article M. G. Crowson, P. Dixon, J. W. Lee, D. Shipp, T. Le, V. Lin, J. Chen, T. C. Y. Chan Otology & Neurotology, Vol. 41, pp. e1013-e1023, 2020. @article{ChanTCY.J063, title = {Predicting post-operative cochlear implant performance using supervised machine learning}, author = {M. G. Crowson and P. Dixon and J. W. Lee and D. Shipp and T. Le and V. Lin and J. Chen and T. C. Y. Chan}, doi = {10.1097/MAO.0000000000002710}, year = {2020}, date = {2020-01-13}, journal = {Otology & Neurotology}, volume = {41}, pages = {e1013-e1023}, keywords = {}, pubstate = {published}, tppubtype = {article} } |
62. | A physiology-based mathematical model for the selection of appropriate ventilator controls for lung and diaphragm protection Journal Article Forthcoming B. Zhang, D. Ratano, L. J. Brochard, D. Georgopoulos, J. Duffin, M. Long, T. Schepens, I. Telias, A. S. Slutsky, E. C. Goligher, T. C. Y. Chan Journal of Clinical Monitoring and Computing, Forthcoming. @article{ChanTCY.J062, title = {A physiology-based mathematical model for the selection of appropriate ventilator controls for lung and diaphragm protection}, author = {B. Zhang and D. Ratano and L. J. Brochard and D. Georgopoulos and J. Duffin and M. Long and T. Schepens and I. Telias and A. S. Slutsky and E. C. Goligher and T. C. Y. Chan}, doi = {10.1007/s10877-020-00479-x}, year = {2020}, date = {2020-01-06}, journal = {Journal of Clinical Monitoring and Computing}, keywords = {}, pubstate = {forthcoming}, tppubtype = {article} } |
61. | A drone delivery network for antiepileptic drugs: a framework and modeling case study in a lowest-income country Journal Article F. J. Mateen, K. H. B. Leung, A. C. Vogel, A. Fode Cissé, T. C. Y. Chan Transactions of the Royal Society of Tropical Medicine and Hygiene, Vol. 114, pp. 308-314, 2020. @article{ChanTCY.J061, title = {A drone delivery network for antiepileptic drugs: a framework and modeling case study in a lowest-income country}, author = {F. J. Mateen and K. H. B. Leung and A. C. Vogel and A. Fode Cissé and T. C. Y. Chan}, doi = {10.1093/trstmh/trz131}, year = {2020}, date = {2020-01-05}, journal = {Transactions of the Royal Society of Tropical Medicine and Hygiene}, volume = {114}, pages = {308-314}, abstract = {Background: In urbanized, low-income cities with high rates of congestion, delivery of antiepileptic drugs (AEDs) by unmanned aerial vehicles (drones) to people with epilepsy for both emergency and non-urgent distribution may prove beneficial. Methods: Conakry is the capital of the Republic of Guinea, a low-income sub-Saharan African country (2018 per capita gross national income US$830). We computed the number of drones and delivery times to distribute AEDs from a main urban hospital to 27 pre-identified gas stations, mosques and pharmacies and compared these to the delivery times of a personal vehicle. Results: We predict that a single drone could serve all pre-identified delivery locations in Conakry within a 20.4-h period. In an emergency case of status epilepticus, 8, 20 and 24 of the 27 pre-identified destinations can be reached from the hub within 5, 10 and 15 min, respectively. Compared with the use of a personal vehicle, the response time for a drone is reduced by an average of 78.8% across all times of the day. Conclusions: Drones can dramatically reduce the response time for both emergency and routine delivery of lifesaving medicines. We discuss the advantages and disadvantages of such a drone delivery model with relevance to epilepsy. However, the commissioning of a trial of drones for drug delivery in related diseases and geographies is justified.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Background: In urbanized, low-income cities with high rates of congestion, delivery of antiepileptic drugs (AEDs) by unmanned aerial vehicles (drones) to people with epilepsy for both emergency and non-urgent distribution may prove beneficial. Methods: Conakry is the capital of the Republic of Guinea, a low-income sub-Saharan African country (2018 per capita gross national income US$830). We computed the number of drones and delivery times to distribute AEDs from a main urban hospital to 27 pre-identified gas stations, mosques and pharmacies and compared these to the delivery times of a personal vehicle. Results: We predict that a single drone could serve all pre-identified delivery locations in Conakry within a 20.4-h period. In an emergency case of status epilepticus, 8, 20 and 24 of the 27 pre-identified destinations can be reached from the hub within 5, 10 and 15 min, respectively. Compared with the use of a personal vehicle, the response time for a drone is reduced by an average of 78.8% across all times of the day. Conclusions: Drones can dramatically reduce the response time for both emergency and routine delivery of lifesaving medicines. We discuss the advantages and disadvantages of such a drone delivery model with relevance to epilepsy. However, the commissioning of a trial of drones for drug delivery in related diseases and geographies is justified. |
60. | Ambulance Emergency Response Optimization in developing countries Journal Article J. J. Boutilier, T. C. Y. Chan Operations Research, Vol. 68, pp. 1315-1334, 2020. @article{ChanTCY.J060, title = {Ambulance Emergency Response Optimization in developing countries}, author = {J. J. Boutilier and T. C. Y. Chan}, doi = {10.1287/opre.2019.1969}, year = {2020}, date = {2020-01-04}, journal = {Operations Research}, volume = {68}, pages = {1315-1334}, abstract = {The lack of emergency medical transportation is viewed as the main barrier to the access and availability of emergency medical care in low- and middle-income countries (LMICs). In this paper, we present a robust optimization approach to optimize both the location and routing of emergency response vehicles, accounting for uncertainty in travel times and spatial demand characteristic of LMICs. We traveled to Dhaka, Bangladesh, the sixth largest and third most densely populated city in the world, to conduct field research resulting in the collection of two unique data sets that inform our approach. These data are leveraged to estimate demand for emergency medical services in an LMIC setting and to predict the travel time between any two locations in the road network for different times of day and days of the week. We combine our prediction-optimization framework with a simulation model and real data to provide an in-depth investigation into three policy- related questions. First, we demonstrate that outpost locations optimized for weekday rush hour lead to good performance for all times of day and days of the week. Second, we find that the performance of the current system could be replicated using one third of the current outpost locations and one half of the current number of ambulances. Finally, we show that a fleet of small ambulances has the potential to significantly outperform tra- ditional ambulance vans. In particular, they are able to capture approximately three times more demand while reducing the median average response time by roughly 10%–18% over the entire week and 24%–35% during rush hour because of increased routing flexibility offered by more nimble vehicles on a larger road network. Our results provide practical insights for emergency response optimization that can be leveraged by hospital-based and private ambulance providers in Dhaka and other urban centers in developing countries.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The lack of emergency medical transportation is viewed as the main barrier to the access and availability of emergency medical care in low- and middle-income countries (LMICs). In this paper, we present a robust optimization approach to optimize both the location and routing of emergency response vehicles, accounting for uncertainty in travel times and spatial demand characteristic of LMICs. We traveled to Dhaka, Bangladesh, the sixth largest and third most densely populated city in the world, to conduct field research resulting in the collection of two unique data sets that inform our approach. These data are leveraged to estimate demand for emergency medical services in an LMIC setting and to predict the travel time between any two locations in the road network for different times of day and days of the week. We combine our prediction-optimization framework with a simulation model and real data to provide an in-depth investigation into three policy- related questions. First, we demonstrate that outpost locations optimized for weekday rush hour lead to good performance for all times of day and days of the week. Second, we find that the performance of the current system could be replicated using one third of the current outpost locations and one half of the current number of ambulances. Finally, we show that a fleet of small ambulances has the potential to significantly outperform tra- ditional ambulance vans. In particular, they are able to capture approximately three times more demand while reducing the median average response time by roughly 10%–18% over the entire week and 24%–35% during rush hour because of increased routing flexibility offered by more nimble vehicles on a larger road network. Our results provide practical insights for emergency response optimization that can be leveraged by hospital-based and private ambulance providers in Dhaka and other urban centers in developing countries. |
59. | Knowledge-based automated planning with three-dimensional generative adversarial networks Journal Article A. Babier, R. Mahmood, A. L. McNiven, A. Diamant, T. C. Y. Chan Medical Physics, Vol. 47, pp. 297-306, 2020. @article{ChanTCY.J059, title = {Knowledge-based automated planning with three-dimensional generative adversarial networks}, author = {A. Babier and R. Mahmood and A. L. McNiven and A. Diamant and T. C. Y. Chan}, doi = {10.1002/mp.13896}, year = {2020}, date = {2020-01-03}, journal = {Medical Physics}, volume = {47}, pages = {297-306}, abstract = {Purpose: To develop a knowledge-based automated planning pipeline that generates treatment plans without feature engineering, using deep neural network architectures for predicting three-dimensional (3D) dose. Methods: Our knowledge-based automated planning (KBAP) pipeline consisted of a knowledge-based planning (KBP) method that predicts dose for a contoured computed tomography (CT) image followed by two optimization models that learn objective function weights and generate fluence-based plans, respectively. We developed a novel generative adversarial network (GAN)-based KBP approach, a 3D GAN model, which predicts dose for the full 3D CT image at once and accounts for correlations between adjacent CT slices. Baseline comparisons were made against two state-of-the-art deep learning–based KBP methods from the literature. We also developed an additional benchmark, a two-dimensional (2D) GAN model which predicts dose to each axial slice independently. For all models, we investigated the impact of multiplicatively scaling the predictions before optimization, such that the predicted dose distributions achieved all target clinical criteria. Each KBP model was trained on 130 previously delivered oropharyngeal treatment plans. Performance was tested on 87 out-of-sample previously delivered treatment plans. All KBAP plans were evaluated using clinical planning criteria and compared to their corresponding clinical plans. KBP prediction quality was assessed using dose-volume histogram (DVH) differences from the corresponding clinical plans. Results: The best performing KBAP plans were generated using predictions from the 3D GAN model that were multiplicatively scaled. These plans satisfied 77% of all clinical criteria, compared to the clinical plans, which satisfied 67% of all criteria. In general, multiplicatively scaling predictions prior to optimization increased the fraction of clinical criteria satisfaction by 11% relative to the plans generated with nonscaled predictions. Additionally, these KBAP plans satisfied the same criteria as the clinical plans 84% and 8% more frequently as compared to the two benchmark methods, respectively. Conclusions: We developed the first knowledge-based automated planning framework using a 3D generative adversarial network for prediction. Our results, based on 217 oropharyngeal cancer treatment plans, demonstrated superior performance in satisfying clinical criteria and generated more realistic plans as compared to the previous state-of-the-art approaches.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Purpose: To develop a knowledge-based automated planning pipeline that generates treatment plans without feature engineering, using deep neural network architectures for predicting three-dimensional (3D) dose. Methods: Our knowledge-based automated planning (KBAP) pipeline consisted of a knowledge-based planning (KBP) method that predicts dose for a contoured computed tomography (CT) image followed by two optimization models that learn objective function weights and generate fluence-based plans, respectively. We developed a novel generative adversarial network (GAN)-based KBP approach, a 3D GAN model, which predicts dose for the full 3D CT image at once and accounts for correlations between adjacent CT slices. Baseline comparisons were made against two state-of-the-art deep learning–based KBP methods from the literature. We also developed an additional benchmark, a two-dimensional (2D) GAN model which predicts dose to each axial slice independently. For all models, we investigated the impact of multiplicatively scaling the predictions before optimization, such that the predicted dose distributions achieved all target clinical criteria. Each KBP model was trained on 130 previously delivered oropharyngeal treatment plans. Performance was tested on 87 out-of-sample previously delivered treatment plans. All KBAP plans were evaluated using clinical planning criteria and compared to their corresponding clinical plans. KBP prediction quality was assessed using dose-volume histogram (DVH) differences from the corresponding clinical plans. Results: The best performing KBAP plans were generated using predictions from the 3D GAN model that were multiplicatively scaled. These plans satisfied 77% of all clinical criteria, compared to the clinical plans, which satisfied 67% of all criteria. In general, multiplicatively scaling predictions prior to optimization increased the fraction of clinical criteria satisfaction by 11% relative to the plans generated with nonscaled predictions. Additionally, these KBAP plans satisfied the same criteria as the clinical plans 84% and 8% more frequently as compared to the two benchmark methods, respectively. Conclusions: We developed the first knowledge-based automated planning framework using a 3D generative adversarial network for prediction. Our results, based on 217 oropharyngeal cancer treatment plans, demonstrated superior performance in satisfying clinical criteria and generated more realistic plans as compared to the previous state-of-the-art approaches. |
58. | Predicting plays in the National Football League Journal Article C. Fernandes, R. Yakubov, Y. Li, A. Prasad, T. C. Y. Chan Journal of Sports Analytics, Vol. 6, pp. 35-43, 2020. @article{ChanTCY.J058, title = {Predicting plays in the National Football League}, author = {C. Fernandes and R. Yakubov and Y. Li and A. Prasad and T. C. Y. Chan}, doi = {10.3233/JSA-190348}, year = {2020}, date = {2020-01-02}, journal = {Journal of Sports Analytics}, volume = {6}, pages = {35-43}, abstract = {This paper aims to develop an interpretable machine learning model to predict plays (pass versus rush) in the National Football League that will be useful for players and coaches in real time. Using data from the 2013–2014 to 2016– 2017 NFL regular seasons, which included 1034 games and 130,344 pass/rush plays, we first develop and compare several machine learning models to determine the maximum possible prediction accuracy. The best performing model, a neural network, achieves a prediction accuracy of 75.3%, which is competitive with the state-of-the-art methods applied to other datasets. Then, we search over a family of simple decision tree models to identify one that captures 86% of the prediction accuracy of the neural network yet can be easily memorized and implemented in an actual game. We extend the analysis to building decision tree models tailored for each of the 32 NFL teams, obtaining accuracies ranging from 64.7% to 82.5%. Overall, our decision tree models can be a useful tool for coaches and players to improve their chances of stopping an offensive play.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This paper aims to develop an interpretable machine learning model to predict plays (pass versus rush) in the National Football League that will be useful for players and coaches in real time. Using data from the 2013–2014 to 2016– 2017 NFL regular seasons, which included 1034 games and 130,344 pass/rush plays, we first develop and compare several machine learning models to determine the maximum possible prediction accuracy. The best performing model, a neural network, achieves a prediction accuracy of 75.3%, which is competitive with the state-of-the-art methods applied to other datasets. Then, we search over a family of simple decision tree models to identify one that captures 86% of the prediction accuracy of the neural network yet can be easily memorized and implemented in an actual game. We extend the analysis to building decision tree models tailored for each of the 32 NFL teams, obtaining accuracies ranging from 64.7% to 82.5%. Overall, our decision tree models can be a useful tool for coaches and players to improve their chances of stopping an offensive play. |
57. | Inverse optimization for the recovery of constraint parameters Journal Article T. C. Y. Chan, N. Kaw European Journal of Operational Research, Vol. 282, pp. 415-427, 2020. @article{ChanTCY.J057, title = {Inverse optimization for the recovery of constraint parameters}, author = {T. C. Y. Chan and N. Kaw}, doi = {10.1016/j.ejor.2019.09.027}, year = {2020}, date = {2020-01-01}, journal = {European Journal of Operational Research}, volume = {282}, pages = {415-427}, abstract = {Most inverse optimization models impute unspecified parameters of an objective function to make an observed solution optimal for a given optimization problem with a fixed feasible set. We propose two approaches to impute unspecified left-hand-side constraint coefficients in addition to a cost vector for a given linear optimization problem. The first approach identifies parameters minimizing the duality gap, while the second minimally perturbs prior estimates of the unspecified parameters to satisfy strong duality, if it is possible to satisfy the optimality conditions exactly. We apply these two approaches to the general linear optimization problem. We also use them to impute unspecified parameters of the uncertainty set for robust linear optimization problems under interval and cardinality constrained uncertainty. Each inverse optimization model we propose is nonconvex, but we show that a globally optimal solution can be obtained either in closed form or by solving a linear number of linear or convex optimization problems.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Most inverse optimization models impute unspecified parameters of an objective function to make an observed solution optimal for a given optimization problem with a fixed feasible set. We propose two approaches to impute unspecified left-hand-side constraint coefficients in addition to a cost vector for a given linear optimization problem. The first approach identifies parameters minimizing the duality gap, while the second minimally perturbs prior estimates of the unspecified parameters to satisfy strong duality, if it is possible to satisfy the optimality conditions exactly. We apply these two approaches to the general linear optimization problem. We also use them to impute unspecified parameters of the uncertainty set for robust linear optimization problems under interval and cardinality constrained uncertainty. Each inverse optimization model we propose is nonconvex, but we show that a globally optimal solution can be obtained either in closed form or by solving a linear number of linear or convex optimization problems. |
2019 |
|
56. | In silico trial of optimized versus real public defibrillator locations Journal Article C. L. F. Sun, L. Karlsson, C. Torp-Pedersen, L. J. Morrison, S. C. Brooks, F. Folke, T. C. Y. Chan Journal of the American College of Cardiology, Vol. 74, pp. 1557-1567, 2019. @article{ChanTCY.J056, title = {In silico trial of optimized versus real public defibrillator locations}, author = {C. L. F. Sun and L. Karlsson and C. Torp-Pedersen and L. J. Morrison and S. C. Brooks and F. Folke and T. C. Y. Chan}, doi = {10.1016/j.jacc.2019.06.075}, year = {2019}, date = {2019-06-16}, journal = {Journal of the American College of Cardiology}, volume = {74}, pages = {1557-1567}, abstract = {Background: Automated external defibrillators (AEDs) are often placed in areas of low risk and limited temporal availability. Mathematical optimization can improve AED accessibility but has not been compared with current practices. Objectives: This study sought to determine whether, compared with real AED locations, optimized AED locations improve coverage of out-of-hospital cardiac arrests (OHCAs). Methods: The authors conducted the first retrospective in silico trial of an AED placement intervention. This study identified all public OHCAs of presumed cardiac cause and real AED deployed (control group) from 2007 to 2016 in Copenhagen, Denmark. Optimization models trained on historical OHCAs (1994 to 2007) were used to optimize an equal number of AEDs to the control group in locations with availabilities based on building hours (intervention #1) or 24/7 access (intervention #2). The 2 interventions and control scenario were compared using the number of OHCAs that occurred within 100 m of an accessible AED ("OHCA coverage") during the 2007 to 2016 period. Change in bystander defibrillation and 30-day survival were estimated using multivariate logistic regression. Results: There were 673 public OHCAs and 1,573 registered AEDs from 2007 to 2016. OHCA coverage of real AED placements was 22.0%. OHCA coverage of interventions #1 and #2 was significantly higher at 33.4% and 43.1%, respectively; relative gains of 52.0% to 95.9% (p < 0.001). Bystander defibrillation increased from 14.6% (control group) to 22.5% to 26.9% (intervention #1 to intervention #2); relative increase of 52.9% to 83.5% (p < 0.001). The 30-day survival rates increased from 31.3% (control group) to 34.7% to 35.4%, which is a relative increase of 11.0% to 13.3% (p < 0.001). Conclusions: Optimized AED placements increased OHCA coverage by approximately 50% to 100% over real AED placements, leading to significant predicted increases in bystander defibrillation and 30-day survival.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Background: Automated external defibrillators (AEDs) are often placed in areas of low risk and limited temporal availability. Mathematical optimization can improve AED accessibility but has not been compared with current practices. Objectives: This study sought to determine whether, compared with real AED locations, optimized AED locations improve coverage of out-of-hospital cardiac arrests (OHCAs). Methods: The authors conducted the first retrospective in silico trial of an AED placement intervention. This study identified all public OHCAs of presumed cardiac cause and real AED deployed (control group) from 2007 to 2016 in Copenhagen, Denmark. Optimization models trained on historical OHCAs (1994 to 2007) were used to optimize an equal number of AEDs to the control group in locations with availabilities based on building hours (intervention #1) or 24/7 access (intervention #2). The 2 interventions and control scenario were compared using the number of OHCAs that occurred within 100 m of an accessible AED ("OHCA coverage") during the 2007 to 2016 period. Change in bystander defibrillation and 30-day survival were estimated using multivariate logistic regression. Results: There were 673 public OHCAs and 1,573 registered AEDs from 2007 to 2016. OHCA coverage of real AED placements was 22.0%. OHCA coverage of interventions #1 and #2 was significantly higher at 33.4% and 43.1%, respectively; relative gains of 52.0% to 95.9% (p < 0.001). Bystander defibrillation increased from 14.6% (control group) to 22.5% to 26.9% (intervention #1 to intervention #2); relative increase of 52.9% to 83.5% (p < 0.001). The 30-day survival rates increased from 31.3% (control group) to 34.7% to 35.4%, which is a relative increase of 11.0% to 13.3% (p < 0.001). Conclusions: Optimized AED placements increased OHCA coverage by approximately 50% to 100% over real AED placements, leading to significant predicted increases in bystander defibrillation and 30-day survival. |
55. | Machine learning & cochlear implantation – A structured review of opportunities and challenges Journal Article M. G. Crowson, V. Lin, J. M. Chen, T. C. Y. Chan Otology & Neurotology, Vol. 41, pp. e36-e45, 2019. @article{ChanTCY.J055, title = {Machine learning & cochlear implantation – A structured review of opportunities and challenges}, author = {M. G. Crowson and V. Lin and J. M. Chen and T. C. Y. Chan}, doi = {10.1097/MAO.0000000000002440}, year = {2019}, date = {2019-05-01}, journal = {Otology & Neurotology}, volume = {41}, pages = {e36-e45}, abstract = {Objective: The use of machine learning technology to automate intellectual processes and boost clinical process efficiency in medicine has exploded in the past 5 years. Machine learning excels in automating pattern recognition and in adapting learned representations to new settings. Moreover, machine learning techniques have the advantage of incorporating complexity and are free from many of the limitations of traditional deterministic approaches. Cochlear implants (CI) are a unique fit for machine learning techniques given the need for optimization of signal processing to fit complex environmental scenarios and individual patients' CI MAPping. However, there are many other opportunities where machine learning may assist in CI beyond signal processing. The objective of this review was to synthesize past applications of machine learning technologies for pediatric and adult CI and describe novel opportunities for research and development. Data sources: The PubMed/MEDLINE, EMBASE, Scopus, and ISI Web of Knowledge databases were mined using a directed search strategy to identify the nexus between CI and artificial intelligence/machine learning literature. Study selection: Non-English language articles, articles without an available abstract or full-text, and nonrelevant articles were manually appraised and excluded. Included articles were evaluated for specific machine learning methodologies, content, and application success. Data synthesis: The database search identified 298 articles. Two hundred fifty-nine articles (86.9%) were excluded based on the available abstract/full-text, language, and relevance. The remaining 39 articles were included in the review analysis. There was a marked increase in year-over-year publications from 2013 to 2018. Applications of machine learning technologies involved speech/signal processing optimization (17; 43.6% of articles), automated evoked potential measurement (6; 15.4%), postoperative performance/efficacy prediction (5; 12.8%), and surgical anatomy location prediction (3; 7.7%), and 2 (5.1%) in each of robotics, electrode placement performance, and biomaterials performance. Conclusion: The relationship between CI and artificial intelligence is strengthening with a recent increase in publications reporting successful applications. Considerable effort has been directed toward augmenting signal processing and automating postoperative MAPping using machine learning algorithms. Other promising applications include augmenting CI surgery mechanics and personalized medicine approaches for boosting CI patient performance. Future opportunities include addressing scalability and the research and clinical communities' acceptance of machine learning algorithms as effective techniques.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Objective: The use of machine learning technology to automate intellectual processes and boost clinical process efficiency in medicine has exploded in the past 5 years. Machine learning excels in automating pattern recognition and in adapting learned representations to new settings. Moreover, machine learning techniques have the advantage of incorporating complexity and are free from many of the limitations of traditional deterministic approaches. Cochlear implants (CI) are a unique fit for machine learning techniques given the need for optimization of signal processing to fit complex environmental scenarios and individual patients' CI MAPping. However, there are many other opportunities where machine learning may assist in CI beyond signal processing. The objective of this review was to synthesize past applications of machine learning technologies for pediatric and adult CI and describe novel opportunities for research and development. Data sources: The PubMed/MEDLINE, EMBASE, Scopus, and ISI Web of Knowledge databases were mined using a directed search strategy to identify the nexus between CI and artificial intelligence/machine learning literature. Study selection: Non-English language articles, articles without an available abstract or full-text, and nonrelevant articles were manually appraised and excluded. Included articles were evaluated for specific machine learning methodologies, content, and application success. Data synthesis: The database search identified 298 articles. Two hundred fifty-nine articles (86.9%) were excluded based on the available abstract/full-text, language, and relevance. The remaining 39 articles were included in the review analysis. There was a marked increase in year-over-year publications from 2013 to 2018. Applications of machine learning technologies involved speech/signal processing optimization (17; 43.6% of articles), automated evoked potential measurement (6; 15.4%), postoperative performance/efficacy prediction (5; 12.8%), and surgical anatomy location prediction (3; 7.7%), and 2 (5.1%) in each of robotics, electrode placement performance, and biomaterials performance. Conclusion: The relationship between CI and artificial intelligence is strengthening with a recent increase in publications reporting successful applications. Considerable effort has been directed toward augmenting signal processing and automating postoperative MAPping using machine learning algorithms. Other promising applications include augmenting CI surgery mechanics and personalized medicine approaches for boosting CI patient performance. Future opportunities include addressing scalability and the research and clinical communities' acceptance of machine learning algorithms as effective techniques. |
54. | Health care utilization prior to out-of-hospital cardiac arrest: a population-based study Journal Article M. Shuvy, M. Koh, F. Qiu, S. C. Brooks, T. C. Y. Chan, S. Cheskes, P. Dorian, G. Geri, S. Lin, D. C. Scales, D. T. Ko Resuscitation, Vol. 141, pp. 158-165, 2019. @article{ChanTCY.J054, title = {Health care utilization prior to out-of-hospital cardiac arrest: a population-based study}, author = {M. Shuvy and M. Koh and F. Qiu and S. C. Brooks and T. C. Y. Chan and S. Cheskes and P. Dorian and G. Geri and S. Lin and D. C. Scales and D. T. Ko}, doi = {10.1016/j.resuscitation.2019.04.033}, year = {2019}, date = {2019-04-18}, journal = {Resuscitation}, volume = {141}, pages = {158-165}, abstract = {Introduction: Although out-of-hospital cardiac arrest (OHCA) is thought of as a sudden event, recent studies suggest that many patients have symptoms or have sought medical attention prior to their arrest. Our objective was to evaluate patterns of healthcare utilization before OHCA. Methods: We conducted a population-based cohort study in Ontario, Canada, which included all patients ≥20 years, who suffered out-of-hospital cardiac arrest and transferred to an emergency department (ED) from 2007 to 2018. Measurements included emergency room assessments, hospitalizations and physician visits prior to arrest. Results: The cohort comprised 38,906 patients, their mean age was 66.5 years, and 32.7% were women. Rates of ED assessments and hospital admissions were relatively constant until 90 days prior to arrest where they markedly increased to the time before arrest. Within 90 days, rates of ED assessment, hospitalization, and primary care physician visit were 29.5%, 16.4%, and 70.1%, respectively. Cardiovascular conditions were diagnosed in 14.4% of ED visits, and 33.7% of hospitalizations in this time period. The largest age-difference was the mental and behavioural disorders within 90 days of OHCA in the ED, where rates were 12.2% among patients <65 years vs. 1.9% for patients ≥65 years. Conclusions: In contrast to the conventional wisdom that OHCA occurs without prior contacts to the health care system, we found that more than 1 in 4 patients were assessed in the ED prior within 90 days of their arrest. Identification of warning signs of OHCA may allow future development of prevention strategies.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Introduction: Although out-of-hospital cardiac arrest (OHCA) is thought of as a sudden event, recent studies suggest that many patients have symptoms or have sought medical attention prior to their arrest. Our objective was to evaluate patterns of healthcare utilization before OHCA. Methods: We conducted a population-based cohort study in Ontario, Canada, which included all patients ≥20 years, who suffered out-of-hospital cardiac arrest and transferred to an emergency department (ED) from 2007 to 2018. Measurements included emergency room assessments, hospitalizations and physician visits prior to arrest. Results: The cohort comprised 38,906 patients, their mean age was 66.5 years, and 32.7% were women. Rates of ED assessments and hospital admissions were relatively constant until 90 days prior to arrest where they markedly increased to the time before arrest. Within 90 days, rates of ED assessment, hospitalization, and primary care physician visit were 29.5%, 16.4%, and 70.1%, respectively. Cardiovascular conditions were diagnosed in 14.4% of ED visits, and 33.7% of hospitalizations in this time period. The largest age-difference was the mental and behavioural disorders within 90 days of OHCA in the ED, where rates were 12.2% among patients <65 years vs. 1.9% for patients ≥65 years. Conclusions: In contrast to the conventional wisdom that OHCA occurs without prior contacts to the health care system, we found that more than 1 in 4 patients were assessed in the ED prior within 90 days of their arrest. Identification of warning signs of OHCA may allow future development of prevention strategies. |
53. | A mathematical optimization framework for expansion draft decision making and analysis Journal Article K. E. C. Booth, T. C. Y. Chan, Y. Shalaby Journal of Quantitative Analysis in Sports, Vol. 15, pp. 27-40, 2019. @article{ChanTCY.J053, title = {A mathematical optimization framework for expansion draft decision making and analysis}, author = {K. E. C. Booth and T. C. Y. Chan and Y. Shalaby}, doi = {10.1515/jqas-2018-0024}, year = {2019}, date = {2019-02-03}, journal = {Journal of Quantitative Analysis in Sports}, volume = {15}, pages = {27-40}, abstract = {In this paper, we present and analyze a mathematical programming approach to expansion draft optimization in the context of the 2017 NHL expansion draft involving the Vegas Golden Knights, noting that this approach can be generalized to future NHL expansions and to those in other sports leagues. In particular, we present a novel mathematical optimization approach, consisting of two models, to optimize expansion draft protection and selection decisions made by the various teams. We use this approach to investigate a number of expansion draft scenarios, including the impact of 'collaboration' between existing teams, the trade-off between team performance and salary cap flexibility, as well as opportunities for Vegas to take advantage of side agreements in a 'leverage' experiment. Finally, we compare the output of our approach to what actually happened in the expansion draft, noting both similarities and discrepancies between our solutions and the actual outcomes. Overall, we believe our framework serves as a promising foundation for future expansion draft research and decision-making in hockey and in other sports.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In this paper, we present and analyze a mathematical programming approach to expansion draft optimization in the context of the 2017 NHL expansion draft involving the Vegas Golden Knights, noting that this approach can be generalized to future NHL expansions and to those in other sports leagues. In particular, we present a novel mathematical optimization approach, consisting of two models, to optimize expansion draft protection and selection decisions made by the various teams. We use this approach to investigate a number of expansion draft scenarios, including the impact of 'collaboration' between existing teams, the trade-off between team performance and salary cap flexibility, as well as opportunities for Vegas to take advantage of side agreements in a 'leverage' experiment. Finally, we compare the output of our approach to what actually happened in the expansion draft, noting both similarities and discrepancies between our solutions and the actual outcomes. Overall, we believe our framework serves as a promising foundation for future expansion draft research and decision-making in hockey and in other sports. |
52. | Process flexibility in baseball: The value of positional flexibility Journal Article T. C. Y. Chan, D. Fearing Management Science, Vol. 65, pp. 1642-1666, 2019. @article{ChanTCY.J052, title = {Process flexibility in baseball: The value of positional flexibility}, author = {T. C. Y. Chan and D. Fearing}, doi = {10.1287/mnsc.2017.3004}, year = {2019}, date = {2019-02-02}, journal = {Management Science}, volume = {65}, pages = {1642-1666}, abstract = {This paper introduces the formal study of process flexibility to the novel domain of sports analytics. In baseball, positional flexibility is the analogous concept to process flexibility from manufacturing. We study the flexibility of players (plants) on a baseball team who produce innings-played at different positions (products). We develop models and metrics to evaluate expected and worst-case performance under injury risk (capacity uncertainty) with continuous player-position capabilities. Using Major League Baseball data, we quantify the impact of flexibility on team and individual performance and explore the player chains that arise when injuries occur. We discover that top teams can attribute at least one to two wins per season to flexibility alone, generally as a result of long subchains in the infield or outfield. The least robust teams to worst-case injury, those whose performance is driven by one or two star players, are over four times as fragile as the most robust teams. We evaluate several aspects of individual flexibility, such as how much value individual players bring to their team in terms of average and worst-case performance. Finally, we demonstrate the generalizability of our framework for player evaluation by quantifying the value of potential free agent additions and uncovering the true 'MVP' of a team.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This paper introduces the formal study of process flexibility to the novel domain of sports analytics. In baseball, positional flexibility is the analogous concept to process flexibility from manufacturing. We study the flexibility of players (plants) on a baseball team who produce innings-played at different positions (products). We develop models and metrics to evaluate expected and worst-case performance under injury risk (capacity uncertainty) with continuous player-position capabilities. Using Major League Baseball data, we quantify the impact of flexibility on team and individual performance and explore the player chains that arise when injuries occur. We discover that top teams can attribute at least one to two wins per season to flexibility alone, generally as a result of long subchains in the infield or outfield. The least robust teams to worst-case injury, those whose performance is driven by one or two star players, are over four times as fragile as the most robust teams. We evaluate several aspects of individual flexibility, such as how much value individual players bring to their team in terms of average and worst-case performance. Finally, we demonstrate the generalizability of our framework for player evaluation by quantifying the value of potential free agent additions and uncovering the true 'MVP' of a team. |
51. | Inverse optimization: Closed-form solutions, geometry, and goodness of fit Journal Article T. C. Y. Chan, T. Lee, D. Terekhov Management Science, Vol. 65, pp. 1115-1135, 2019. @article{ChanTCY.J051, title = {Inverse optimization: Closed-form solutions, geometry, and goodness of fit}, author = {T. C. Y. Chan and T. Lee and D. Terekhov}, doi = {10.1287/mnsc.2017.2992}, year = {2019}, date = {2019-02-01}, journal = {Management Science}, volume = {65}, pages = {1115-1135}, abstract = {In classical inverse linear optimization, one assumes that a given solution is a candidate to be optimal. Real data are imperfect and noisy, so there is no guarantee that this assumption is satisfied. Inspired by regression, this paper presents a unified framework for cost function estimation in linear optimization comprising a general inverse optimization model and a corresponding goodness-of-fit metric. Although our inverse optimization model is nonconvex, we derive a closed-form solution and present the geometric intuition. Our goodness-of-fit metric, ρ, the coefficient of complementarity, has similar properties to R^2 from regression and is quasi-convex in the input data, leading to an intuitive geometric interpretation. While ρ is computable in polynomial time, we derive a lower bound that possesses the same properties, is tight for several important model variations, and is even easier to compute. We demonstrate the application of our framework for model estimation and evaluation in production planning and cancer therapy.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In classical inverse linear optimization, one assumes that a given solution is a candidate to be optimal. Real data are imperfect and noisy, so there is no guarantee that this assumption is satisfied. Inspired by regression, this paper presents a unified framework for cost function estimation in linear optimization comprising a general inverse optimization model and a corresponding goodness-of-fit metric. Although our inverse optimization model is nonconvex, we derive a closed-form solution and present the geometric intuition. Our goodness-of-fit metric, ρ, the coefficient of complementarity, has similar properties to R^2 from regression and is quasi-convex in the input data, leading to an intuitive geometric interpretation. While ρ is computable in polynomial time, we derive a lower bound that possesses the same properties, is tight for several important model variations, and is even easier to compute. We demonstrate the application of our framework for model estimation and evaluation in production planning and cancer therapy. |
50. | A contemporary review of machine learning in otolaryngology-head & neck surgery Journal Article M. G. Crowson, J. Ranisau, A. Eskander, A. Babier, B. Xu, R. R. Kahmke, J. M. Chen, T. C. Y. Chan Laryngoscope, Vol. 130, pp. 45-51, 2019. @article{ChanTCY.J050, title = {A contemporary review of machine learning in otolaryngology-head & neck surgery}, author = {M. G. Crowson and J. Ranisau and A. Eskander and A. Babier and B. Xu and R. R. Kahmke and J. M. Chen and T. C. Y. Chan}, doi = {10.1002/lary.27850}, year = {2019}, date = {2019-01-11}, journal = {Laryngoscope}, volume = {130}, pages = {45-51}, abstract = {One of the key challenges with big data is leveraging the complex network of information to yield useful clinical insights. The confluence of massive amounts of health data and a desire to make inferences and insights on these data has produced a substantial amount of interest in machine-learning analytic methods. There has been a drastic increase in the otolaryngology literature volume describing novel applications of machine learning within the past 5 years. In this timely contemporary review, we provide an overview of popular machine-learning techniques, and review recent machine-learning applications in otolaryngology–head and neck surgery including neurotology, head and neck oncology, laryngology, and rhinology. Investigators have realized significant success in validated models with model sensitivities and specificities approaching 100%. Challenges remain in the implementation of machine-learning algorithms. This may be in part the unfamiliarity of these techniques to clinician leaders on the front lines of patient care. Spreading awareness and confidence in machine learning will follow with further validation and proof-of-value analyses that demonstrate model performance superiority over established methods. We are poised to see a greater influx of machine-learning applications to clinical problems in otolaryngology–head and neck surgery, and it is prudent for providers to understand the potential benefits and limitations of these technologies.}, keywords = {}, pubstate = {published}, tppubtype = {article} } One of the key challenges with big data is leveraging the complex network of information to yield useful clinical insights. The confluence of massive amounts of health data and a desire to make inferences and insights on these data has produced a substantial amount of interest in machine-learning analytic methods. There has been a drastic increase in the otolaryngology literature volume describing novel applications of machine learning within the past 5 years. In this timely contemporary review, we provide an overview of popular machine-learning techniques, and review recent machine-learning applications in otolaryngology–head and neck surgery including neurotology, head and neck oncology, laryngology, and rhinology. Investigators have realized significant success in validated models with model sensitivities and specificities approaching 100%. Challenges remain in the implementation of machine-learning algorithms. This may be in part the unfamiliarity of these techniques to clinician leaders on the front lines of patient care. Spreading awareness and confidence in machine learning will follow with further validation and proof-of-value analyses that demonstrate model performance superiority over established methods. We are poised to see a greater influx of machine-learning applications to clinical problems in otolaryngology–head and neck surgery, and it is prudent for providers to understand the potential benefits and limitations of these technologies. |
2018 |
|
49. | Robust radiotherapy planning Journal Article J. Unkelbach, M. Alber, M. Bangert, R. Bokrantz, T. C. Y. Chan, J. Deasy, A. Fredriksson, B. L. Gorissen, M. van Herk, W. Liu, H. Mahmoudzadeh, O. Nohadani, J. V. Siebers, M. Witte, H. Xu Physics in Medicine and Biology, Vol. 63(Article No. 22TR02), 2018. @article{ChanTCY.J049, title = {Robust radiotherapy planning}, author = {J. Unkelbach and M. Alber and M. Bangert and R. Bokrantz and T. C. Y. Chan and J. Deasy and A. Fredriksson and B. L. Gorissen and M. van Herk and W. Liu and H. Mahmoudzadeh and O. Nohadani and J. V. Siebers and M. Witte and H. Xu}, doi = {10.1088/1361-6560/aae659}, year = {2018}, date = {2018-10-05}, journal = {Physics in Medicine and Biology}, volume = {63}, number = {Article No. 22TR02}, abstract = {Motion and uncertainty in radiotherapy is traditionally handled via margins. The clinical target volume (CTV) is expanded to a larger planning target volume (PTV), which is irradiated to the prescribed dose. However, the PTV concept has several limitations, especially in proton therapy. Therefore, robust and probabilistic optimization methods have been developed that directly incorporate motion and uncertainty into treatment plan optimization for intensity modulated radiotherapy (IMRT) and intensity modulated proton therapy (IMPT). Thereby, the explicit definition of a PTV becomes obsolete and treatment plan optimization is directly based on the CTV. Initial work focused on random and systematic setup errors in IMRT. Later, inter-fraction prostate motion and intra-fraction lung motion became a research focus. Over the past ten years, IMPT has emerged as a new application for robust planning methods. In proton therapy, range or setup errors may lead to dose degradation and misalignment of dose contributions from different beams – a problem that cannot generally be addressed by margins. Therefore, IMPT has led to the first implementations of robust planning methods in commercial planning systems, making these methods available for clinical use. This paper first summarizes the limitations of the PTV concept. Subsequently, robust optimization methods are introduced and their applications in IMRT and IMPT planning are reviewed.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Motion and uncertainty in radiotherapy is traditionally handled via margins. The clinical target volume (CTV) is expanded to a larger planning target volume (PTV), which is irradiated to the prescribed dose. However, the PTV concept has several limitations, especially in proton therapy. Therefore, robust and probabilistic optimization methods have been developed that directly incorporate motion and uncertainty into treatment plan optimization for intensity modulated radiotherapy (IMRT) and intensity modulated proton therapy (IMPT). Thereby, the explicit definition of a PTV becomes obsolete and treatment plan optimization is directly based on the CTV. Initial work focused on random and systematic setup errors in IMRT. Later, inter-fraction prostate motion and intra-fraction lung motion became a research focus. Over the past ten years, IMPT has emerged as a new application for robust planning methods. In proton therapy, range or setup errors may lead to dose degradation and misalignment of dose contributions from different beams – a problem that cannot generally be addressed by margins. Therefore, IMPT has led to the first implementations of robust planning methods in commercial planning systems, making these methods available for clinical use. This paper first summarizes the limitations of the PTV concept. Subsequently, robust optimization methods are introduced and their applications in IMRT and IMPT planning are reviewed. |
48. | Applications of machine learning algorithms to predict therapeutic outcomes in depression: A meta-analysis and systematic review Journal Article Y. Lee, R.-M. Ragguett, R. B. Mansur, J. J. Boutilier, J. D. Rosenblat, A. Trevizol, E. Brietzke, K. Lin, Z. Pan, M. Subramaniapillai, T. C. Y. Chan, D. Fus, C. Park, N. Musial, H. Zuckerman, V. C.-H. Chen, R. Ho, C. Rong, R. S. McIntyre Journal of Affective Disorders, Vol. 241, pp. 519-532, 2018. @article{ChanTCY.J048, title = {Applications of machine learning algorithms to predict therapeutic outcomes in depression: A meta-analysis and systematic review}, author = {Y. Lee and R.-M. Ragguett and R. B. Mansur and J. J. Boutilier and J. D. Rosenblat and A. Trevizol and E. Brietzke and K. Lin and Z. Pan and M. Subramaniapillai and T. C. Y. Chan and D. Fus and C. Park and N. Musial and H. Zuckerman and V. C.-H. Chen and R. Ho and C. Rong and R. S. McIntyre}, doi = {10.1016/j.jad.2018.08.073}, year = {2018}, date = {2018-08-12}, journal = {Journal of Affective Disorders}, volume = {241}, pages = {519-532}, abstract = {Background: No previous study has comprehensively reviewed the application of machine learning algorithms in mood disorders populations. Herein, we qualitatively and quantitatively evaluate previous studies of machine learning-devised models that predict therapeutic outcomes in mood disorders populations. Methods: We searched Ovid MEDLINE/PubMed from inception to February 8, 2018 for relevant studies that included adults with bipolar or unipolar depression; assessed therapeutic outcomes with a pharmacological, neuromodulatory, or manual-based psychotherapeutic intervention for depression; applied a machine learning algorithm; and reported predictors of therapeutic response. A random-effects meta-analysis of proportions and meta-regression analyses were conducted. Results: We identified 639 records: 75 full-text publications were assessed for eligibility; 26 studies (n = 17, 499) and 20 studies (n = 6325) were included in qualitative and quantitative review, respectively. Classification algorithms were able to predict therapeutic outcomes with an overall accuracy of 0.82 (95% confidence interval [CI] of [0.77, 0.87]). Pooled estimates of classification accuracy were significantly greater (p < 0.01) in models informed by multiple data types (e.g., composite of phenomenological patient features and neuroimaging or peripheral gene expression data; pooled proportion [95% CI] = 0.93[0.86, 0.97]) when compared to models with lower-dimension data types (pooled proportion = 0.68[0.62, 0.74] to 0.85[0.81, 0.88]). Limitations: Most studies were retrospective; differences in machine learning algorithms and their implementation (e.g., cross-validation, hyperparameter tuning); cannot infer importance of individual variables fed into learning algorithm. Conclusions: Machine learning algorithms provide a powerful conceptual and analytic framework capable of integrating multiple data types and sources. An integrative approach may more effectively model neurobiological components as functional modules of pathophysiology embedded within the complex, social dynamics that influence the phenomenology of mental disorders.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Background: No previous study has comprehensively reviewed the application of machine learning algorithms in mood disorders populations. Herein, we qualitatively and quantitatively evaluate previous studies of machine learning-devised models that predict therapeutic outcomes in mood disorders populations. Methods: We searched Ovid MEDLINE/PubMed from inception to February 8, 2018 for relevant studies that included adults with bipolar or unipolar depression; assessed therapeutic outcomes with a pharmacological, neuromodulatory, or manual-based psychotherapeutic intervention for depression; applied a machine learning algorithm; and reported predictors of therapeutic response. A random-effects meta-analysis of proportions and meta-regression analyses were conducted. Results: We identified 639 records: 75 full-text publications were assessed for eligibility; 26 studies (n = 17, 499) and 20 studies (n = 6325) were included in qualitative and quantitative review, respectively. Classification algorithms were able to predict therapeutic outcomes with an overall accuracy of 0.82 (95% confidence interval [CI] of [0.77, 0.87]). Pooled estimates of classification accuracy were significantly greater (p < 0.01) in models informed by multiple data types (e.g., composite of phenomenological patient features and neuroimaging or peripheral gene expression data; pooled proportion [95% CI] = 0.93[0.86, 0.97]) when compared to models with lower-dimension data types (pooled proportion = 0.68[0.62, 0.74] to 0.85[0.81, 0.88]). Limitations: Most studies were retrospective; differences in machine learning algorithms and their implementation (e.g., cross-validation, hyperparameter tuning); cannot infer importance of individual variables fed into learning algorithm. Conclusions: Machine learning algorithms provide a powerful conceptual and analytic framework capable of integrating multiple data types and sources. An integrative approach may more effectively model neurobiological components as functional modules of pathophysiology embedded within the complex, social dynamics that influence the phenomenology of mental disorders. |
47. | Spatiotemporal AED optimization is generalizable Journal Article C. L. F. Sun, L. Karlsson, C. Torp-Pedersen, L. J. Morrison, F. Folke, T. C. Y. Chan Resuscitation, Vol. 131, pp. 101-107, 2018. @article{ChanTCY.J047, title = {Spatiotemporal AED optimization is generalizable}, author = {C. L. F. Sun and L. Karlsson and C. Torp-Pedersen and L. J. Morrison and F. Folke and T. C. Y. Chan}, doi = {10.1016/j.resuscitation.2018.08.012}, year = {2018}, date = {2018-08-08}, journal = {Resuscitation}, volume = {131}, pages = {101-107}, abstract = {Aims: Mathematical optimization of automated external defibrillator (AED) placements has the potential to improve out-of-hospital cardiac arrest (OHCA) coverage and reverse the negative effects of limited AED accessibility. However, the generalizability of optimization approaches has not yet been investigated. Our goal is to examine the performance and generalizability of a spatiotemporal AED placement optimization methodology, initially developed for Toronto, Canada, to the new study setting of Copenhagen, Denmark. Methods: We identified all public OHCAs (1994–2016) and all registered AEDs (2016) in Copenhagen, Denmark. We calculated the coverage loss associated with limited temporal accessibility of registered AEDs, and used a spatiotemporal optimization model to quantify the potential coverage gain of optimized AED deployment. Coverage gain of spatiotemporal deployment over a spatial-only solution was quantified through 10-fold cross-validation. Statistical testing was performed using χ^2 and McNemar's tests. Results: We found 2149 public OHCAs and 1573 registered AED locations. Coverage loss was found to be 24.4% (1104 OHCAs covered under assumed 24/7 coverage, and 835 OHCAs under actual coverage). The coverage gain from using the spatiotemporal model over a spatial-only approach was 15.3%. Temporal and geographical trends in coverage gain were similar to Toronto. Conclusions: Without modification, a previously developed spatiotemporal AED optimization approach was applied to Copenhagen, resulting in similar OHCA coverage findings as Toronto, despite large geographic and cultural differences between the two cities. In addition to reinforcing the importance of temporal accessibility of AEDs, these similarities demonstrate the generalizability of optimization approaches to improve AED placement and accessibility.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Aims: Mathematical optimization of automated external defibrillator (AED) placements has the potential to improve out-of-hospital cardiac arrest (OHCA) coverage and reverse the negative effects of limited AED accessibility. However, the generalizability of optimization approaches has not yet been investigated. Our goal is to examine the performance and generalizability of a spatiotemporal AED placement optimization methodology, initially developed for Toronto, Canada, to the new study setting of Copenhagen, Denmark. Methods: We identified all public OHCAs (1994–2016) and all registered AEDs (2016) in Copenhagen, Denmark. We calculated the coverage loss associated with limited temporal accessibility of registered AEDs, and used a spatiotemporal optimization model to quantify the potential coverage gain of optimized AED deployment. Coverage gain of spatiotemporal deployment over a spatial-only solution was quantified through 10-fold cross-validation. Statistical testing was performed using χ^2 and McNemar's tests. Results: We found 2149 public OHCAs and 1573 registered AED locations. Coverage loss was found to be 24.4% (1104 OHCAs covered under assumed 24/7 coverage, and 835 OHCAs under actual coverage). The coverage gain from using the spatiotemporal model over a spatial-only approach was 15.3%. Temporal and geographical trends in coverage gain were similar to Toronto. Conclusions: Without modification, a previously developed spatiotemporal AED optimization approach was applied to Copenhagen, resulting in similar OHCA coverage findings as Toronto, despite large geographic and cultural differences between the two cities. In addition to reinforcing the importance of temporal accessibility of AEDs, these similarities demonstrate the generalizability of optimization approaches to improve AED placement and accessibility. |
46. | A Bayesian regression approach to handicapping tennis players based on a rating system Journal Article T. C. Y. Chan, R. Singal Journal of Quantitative Analysis in Sports, Vol. 14, pp. 131-141, 2018. @article{ChanTCY.J046, title = {A Bayesian regression approach to handicapping tennis players based on a rating system}, author = {T. C. Y. Chan and R. Singal}, doi = {10.1515/jqas-2017-0103}, year = {2018}, date = {2018-08-01}, journal = {Journal of Quantitative Analysis in Sports}, volume = {14}, pages = {131-141}, abstract = {This paper builds on a recently developed Markov Decision Process-based (MDP) handicap system for tennis, which aims to make amateur matches more competitive. The system gives points to the weaker player based on skill difference, which is measured by the point-win probability. However, estimating point-win probabilities at the amateur level is challenging since point-level data is generally only available at the professional level. On the other hand, tennis rating systems are widely used and provide an estimate of the difference in ability between players, but a rigorous determination of handicap using rating systems is lacking. Therefore, our goal is to develop a mapping between the Universal Tennis Rating (UTR) system and the MDP-based handicaps, so that two amateur players can determine an appropriate handicap for their match based only on their UTRs. We first develop and validate an approach to extract server-independent point-win probabilities from match scores. Then, we show how to map server-independent point-win probabilities to server-specific point-win probabilities. Finally, we use the estimated probabilities to produce handicaps via the MDP model, which are regressed against UTR differences between pairs of players. We conclude with thoughts on how a handicap system could be implemented in practice.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This paper builds on a recently developed Markov Decision Process-based (MDP) handicap system for tennis, which aims to make amateur matches more competitive. The system gives points to the weaker player based on skill difference, which is measured by the point-win probability. However, estimating point-win probabilities at the amateur level is challenging since point-level data is generally only available at the professional level. On the other hand, tennis rating systems are widely used and provide an estimate of the difference in ability between players, but a rigorous determination of handicap using rating systems is lacking. Therefore, our goal is to develop a mapping between the Universal Tennis Rating (UTR) system and the MDP-based handicaps, so that two amateur players can determine an appropriate handicap for their match based only on their UTRs. We first develop and validate an approach to extract server-independent point-win probabilities from match scores. Then, we show how to map server-independent point-win probabilities to server-specific point-win probabilities. Finally, we use the estimated probabilities to produce handicaps via the MDP model, which are regressed against UTR differences between pairs of players. We conclude with thoughts on how a handicap system could be implemented in practice. |
45. | A small number of objective function weight vectors is sufficient for automated treatment planning in prostate cancer Journal Article A. Goli, J. J. Boutilier, M. B. Sharpe, T. Craig, T. C. Y. Chan Physics in Medicine and Biology, Vol. 63(Article No. 195004), 2018. @article{ChanTCY.J045, title = {A small number of objective function weight vectors is sufficient for automated treatment planning in prostate cancer}, author = {A. Goli and J. J. Boutilier and M. B. Sharpe and T. Craig and T. C. Y. Chan}, doi = {10.1088/1361-6560/aad2f0}, year = {2018}, date = {2018-07-11}, journal = {Physics in Medicine and Biology}, volume = {63}, number = {Article No. 195004}, abstract = {Current practice for treatment planning optimization can be both inefficient and time consuming. In this paper, we propose an automated planning methodology that aims to combine both explorative and prescriptive approaches for improving the efficiency and the quality of the treatment planning process. Given a treatment plan, our explorative approach explores trade-offs between different objectives and finds an acceptable region for objective function weights via inverse optimization. Intuitively, the shape and size of these regions describe how 'sensitive' a patient is to perturbations in objective function weights. We then develop an integer programming-based prescriptive approach that exploits the information encoded by these regions to find a set of five representative objective function weight vectors such that for each patient there exists at least one representative weight vector that can produce a high quality treatment plan. Using 315 patients from Princess Margaret Cancer Centre, we show that the produced treatment plans are comparable and, for 96% of cases, improve upon the inversely optimized plans that are generated from the historical clinical treatment plans.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Current practice for treatment planning optimization can be both inefficient and time consuming. In this paper, we propose an automated planning methodology that aims to combine both explorative and prescriptive approaches for improving the efficiency and the quality of the treatment planning process. Given a treatment plan, our explorative approach explores trade-offs between different objectives and finds an acceptable region for objective function weights via inverse optimization. Intuitively, the shape and size of these regions describe how 'sensitive' a patient is to perturbations in objective function weights. We then develop an integer programming-based prescriptive approach that exploits the information encoded by these regions to find a set of five representative objective function weight vectors such that for each patient there exists at least one representative weight vector that can produce a high quality treatment plan. Using 315 patients from Princess Margaret Cancer Centre, we show that the produced treatment plans are comparable and, for 96% of cases, improve upon the inversely optimized plans that are generated from the historical clinical treatment plans. |
44. | Inverse optimization of objective function weights for treatment planning using clinical dose-volume histograms Journal Article A. Babier, J. J. Boutilier, M. B. Sharpe, A. L. McNiven, T. C. Y. Chan Physics in Medicine and Biology, Vol. 63(Article No. 105004), 2018. @article{ChanTCY.J044, title = {Inverse optimization of objective function weights for treatment planning using clinical dose-volume histograms}, author = {A. Babier and J. J. Boutilier and M. B. Sharpe and A. L. McNiven and T. C. Y. Chan}, doi = {10.1088/1361-6560/aabd14}, year = {2018}, date = {2018-04-10}, journal = {Physics in Medicine and Biology}, volume = {63}, number = {Article No. 105004}, abstract = {We developed and evaluated a novel inverse optimization (IO) model to estimate objective function weights from clinical dose-volume histograms (DVHs). These weights were used to solve a treatment planning problem to generate 'inverse plans' that had similar DVHs to the original clinical DVHs. Our methodology was applied to 217 clinical head and neck cancer treatment plans that were previously delivered at Princess Margaret Cancer Centre in Canada. Inverse plan DVHs were compared to the clinical DVHs using objective function values, dose-volume differences, and frequency of clinical planning criteria satisfaction. Median differences between the clinical and inverse DVHs were within 1.1 Gy. For most structures, the difference in clinical planning criteria satisfaction between the clinical and inverse plans was at most 1.4%. For structures where the two plans differed by more than 1.4% in planning criteria satisfaction, the difference in average criterion violation was less than 0.5 Gy. Overall, the inverse plans were very similar to the clinical plans. Compared with a previous inverse optimization method from the literature, our new inverse plans typically satisfied the same or more clinical criteria, and had consistently lower fluence heterogeneity. Overall, this paper demonstrates that DVHs, which are essentially summary statistics, provide sufficient information to estimate objective function weights that result in high quality treatment plans. However, as with any summary statistic that compresses three-dimensional dose information, care must be taken to avoid generating plans with undesirable features such as hotspots; our computational results suggest that such undesirable spatial features were uncommon. Our IO-based approach can be integrated into the current clinical planning paradigm to better initialize the planning process and improve planning efficiency. It could also be embedded in a knowledge-based planning or adaptive radiation therapy framework to automatically generate a new plan given a predicted or updated target DVH, respectively.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We developed and evaluated a novel inverse optimization (IO) model to estimate objective function weights from clinical dose-volume histograms (DVHs). These weights were used to solve a treatment planning problem to generate 'inverse plans' that had similar DVHs to the original clinical DVHs. Our methodology was applied to 217 clinical head and neck cancer treatment plans that were previously delivered at Princess Margaret Cancer Centre in Canada. Inverse plan DVHs were compared to the clinical DVHs using objective function values, dose-volume differences, and frequency of clinical planning criteria satisfaction. Median differences between the clinical and inverse DVHs were within 1.1 Gy. For most structures, the difference in clinical planning criteria satisfaction between the clinical and inverse plans was at most 1.4%. For structures where the two plans differed by more than 1.4% in planning criteria satisfaction, the difference in average criterion violation was less than 0.5 Gy. Overall, the inverse plans were very similar to the clinical plans. Compared with a previous inverse optimization method from the literature, our new inverse plans typically satisfied the same or more clinical criteria, and had consistently lower fluence heterogeneity. Overall, this paper demonstrates that DVHs, which are essentially summary statistics, provide sufficient information to estimate objective function weights that result in high quality treatment plans. However, as with any summary statistic that compresses three-dimensional dose information, care must be taken to avoid generating plans with undesirable features such as hotspots; our computational results suggest that such undesirable spatial features were uncommon. Our IO-based approach can be integrated into the current clinical planning paradigm to better initialize the planning process and improve planning efficiency. It could also be embedded in a knowledge-based planning or adaptive radiation therapy framework to automatically generate a new plan given a predicted or updated target DVH, respectively. |
43. | Knowledge-based automated planning for oropharyngeal cancer Journal Article A. Babier, J. J. Boutilier, A. L. McNiven, T. C. Y. Chan Medical Physics, Vol. 45, pp. 2875-2883, 2018. @article{ChanTCY.J043, title = {Knowledge-based automated planning for oropharyngeal cancer}, author = {A. Babier and J. J. Boutilier and A. L. McNiven and T. C. Y. Chan}, doi = {10.1002/mp.12930}, year = {2018}, date = {2018-04-05}, journal = {Medical Physics}, volume = {45}, pages = {2875-2883}, abstract = {Purpose: The purpose of this study was to automatically generate radiation therapy plans for oropharynx patients by combining knowledge-based planning (KBP) predictions with an inverse optimization (IO) pipeline. Methods: We developed two KBP approaches, the bagging query (BQ) method and the generalized principal component analysis-based (gPCA) method, to predict achievable dose–volume histograms (DVHs). These approaches generalize existing methods by predicting physically feasible organ-at-risk (OAR) and target DVHs in sites with multiple targets. Using leave-one-out cross validation, we applied both models to a large dataset of 217 oropharynx patients. The predicted DVHs were input into an IO pipeline that generated treatment plans (BQ and gPCA plans) via an intermediate step that estimated objective function weights for an inverse planning model. The KBP predictions were compared to the clinical DVHs for benchmarking. To assess the complete pipeline, we compared the BQ and gPCA plans to both the predictions and clinical plans. To isolate the effect of the KBP predictions, we put clinical DVHs through the IO pipeline to produce clinical inverse optimized (CIO) plans. This approach also allowed us to estimate the complexity of the clinical plans. The BQ and gPCA plans were benchmarked against the CIO plans using DVH differences and clinical planning criteria. Iso-complexity plans (relative to CIO) were also generated and evaluated. Results: The BQ method tended to predict that less dose is delivered than what was observed in the clinical plans while the gPCA predictions were more similar to clinical DVHs. Both populations of KBP predictions were reproduced with inverse plans to within a median DVH difference of 3 Gy. Clinical planning criteria for OARs were satisfied most frequently by the BQ plans (74.4%), by 6.3% points more than the clinical plans. Meanwhile, target criteria were satisfied most frequently by the gPCA plans (90.2%), and by 21.2% points more than clinical plans. However, once the complexity of the plans was constrained to that of the CIO plans, the performance of the BQ plans degraded significantly. In contrast, the gPCA plans still satisfied more clinical criteria than both the clinical and CIO plans, with the most notable improvement being in target criteria. Conclusion: Our automated pipeline can successfully use DVH predictions to generate high-quality plans without human intervention. Between the two KBP methods, gPCA plans tend to achieve comparable performance as clinical plans, even when controlling for plan complexity, whereas BQ plans tended to underperform.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Purpose: The purpose of this study was to automatically generate radiation therapy plans for oropharynx patients by combining knowledge-based planning (KBP) predictions with an inverse optimization (IO) pipeline. Methods: We developed two KBP approaches, the bagging query (BQ) method and the generalized principal component analysis-based (gPCA) method, to predict achievable dose–volume histograms (DVHs). These approaches generalize existing methods by predicting physically feasible organ-at-risk (OAR) and target DVHs in sites with multiple targets. Using leave-one-out cross validation, we applied both models to a large dataset of 217 oropharynx patients. The predicted DVHs were input into an IO pipeline that generated treatment plans (BQ and gPCA plans) via an intermediate step that estimated objective function weights for an inverse planning model. The KBP predictions were compared to the clinical DVHs for benchmarking. To assess the complete pipeline, we compared the BQ and gPCA plans to both the predictions and clinical plans. To isolate the effect of the KBP predictions, we put clinical DVHs through the IO pipeline to produce clinical inverse optimized (CIO) plans. This approach also allowed us to estimate the complexity of the clinical plans. The BQ and gPCA plans were benchmarked against the CIO plans using DVH differences and clinical planning criteria. Iso-complexity plans (relative to CIO) were also generated and evaluated. Results: The BQ method tended to predict that less dose is delivered than what was observed in the clinical plans while the gPCA predictions were more similar to clinical DVHs. Both populations of KBP predictions were reproduced with inverse plans to within a median DVH difference of 3 Gy. Clinical planning criteria for OARs were satisfied most frequently by the BQ plans (74.4%), by 6.3% points more than the clinical plans. Meanwhile, target criteria were satisfied most frequently by the gPCA plans (90.2%), and by 21.2% points more than clinical plans. However, once the complexity of the plans was constrained to that of the CIO plans, the performance of the BQ plans degraded significantly. In contrast, the gPCA plans still satisfied more clinical criteria than both the clinical and CIO plans, with the most notable improvement being in target criteria. Conclusion: Our automated pipeline can successfully use DVH predictions to generate high-quality plans without human intervention. Between the two KBP methods, gPCA plans tend to achieve comparable performance as clinical plans, even when controlling for plan complexity, whereas BQ plans tended to underperform. |
42. | Women's College Hospital uses Operations Research to create an ambulatory clinic schedule Journal Article B. K. Eagen, T. C. Y. Chan, M. W. Carter Service Science, Vol. 10, pp. 230-240, 2018. @article{ChanTCY.J042, title = {Women's College Hospital uses Operations Research to create an ambulatory clinic schedule}, author = {B. K. Eagen and T. C. Y. Chan and M. W. Carter}, doi = {10.1287/serv.2018.0221}, year = {2018}, date = {2018-04-01}, journal = {Service Science}, volume = {10}, pages = {230-240}, abstract = {Women's College Hospital (WCH) in Toronto, Canada, offers roughly 300 outpatient clinics every week. In April 2011, we started working with WCH to design a new schedule for their clinics, to accommodate a move to a new hospital building that was completed in May 2013. We developed an integer programming model to optimize the assignment of clinics to timeslots and locations, based on the desire to minimize changes from the historical schedule. In cooperation with senior leadership, we tested multiple scenarios that explored changes to space utilization policies at WCH and ultimately generated a new clinic schedule, which they implemented in May 2013. In this paper, we highlight the value our work has created for WCH and present the lessons we learned in development of the model and through our collaboration with the WCH team.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Women's College Hospital (WCH) in Toronto, Canada, offers roughly 300 outpatient clinics every week. In April 2011, we started working with WCH to design a new schedule for their clinics, to accommodate a move to a new hospital building that was completed in May 2013. We developed an integer programming model to optimize the assignment of clinics to timeslots and locations, based on the desire to minimize changes from the historical schedule. In cooperation with senior leadership, we tested multiple scenarios that explored changes to space utilization policies at WCH and ultimately generated a new clinic schedule, which they implemented in May 2013. In this paper, we highlight the value our work has created for WCH and present the lessons we learned in development of the model and through our collaboration with the WCH team. |
41. | A mixed-integer optimization approach for homogeneous magnet design Journal Article I. Dayarian, T. C. Y. Chan, D. Jaffray, T. Stanescu Technology, Vol. 6, pp. 49-58, 2018. @article{ChanTCY.J041, title = {A mixed-integer optimization approach for homogeneous magnet design}, author = {I. Dayarian and T. C. Y. Chan and D. Jaffray and T. Stanescu}, doi = {10.1142/S2339547818500036}, year = {2018}, date = {2018-03-24}, journal = {Technology}, volume = {6}, pages = {49-58}, abstract = {Magnetic resonance imaging (MRI) is a powerful diagnostic tool that has become the imaging modality of choice for soft-tissue visualization in radiation therapy. Emerging technologies aim to integrate MRI with a medical linear accelerator to form novel cancer therapy systems (MR-linac), but the design of these systems to date relies on heuristic procedures. This paper develops an exact, optimization-based approach for magnet design that 1) incorporates the most accurate physics calculations to date, 2) determines precisely the relative spatial location, size, and current magnitude of the magnetic coils, 3) guarantees field homogeneity inside the imaging volume, 4) produces configurations that satisfy, for the first time, small-footprint feasibility constraints required for MR-linacs. Our approach leverages modern mixed-integer programming (MIP), enabling significant flexibility in magnet design generation, e.g., controlling the number of coils and enforcing symmetry between magnet poles. Our numerical results demonstrate the superiority of our method versus current mainstream methods.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Magnetic resonance imaging (MRI) is a powerful diagnostic tool that has become the imaging modality of choice for soft-tissue visualization in radiation therapy. Emerging technologies aim to integrate MRI with a medical linear accelerator to form novel cancer therapy systems (MR-linac), but the design of these systems to date relies on heuristic procedures. This paper develops an exact, optimization-based approach for magnet design that 1) incorporates the most accurate physics calculations to date, 2) determines precisely the relative spatial location, size, and current magnitude of the magnetic coils, 3) guarantees field homogeneity inside the imaging volume, 4) produces configurations that satisfy, for the first time, small-footprint feasibility constraints required for MR-linacs. Our approach leverages modern mixed-integer programming (MIP), enabling significant flexibility in magnet design generation, e.g., controlling the number of coils and enforcing symmetry between magnet poles. Our numerical results demonstrate the superiority of our method versus current mainstream methods. |
40. | Trade-off preservation in inverse multi-objective convex optimization Journal Article T. C. Y. Chan, T. Lee European Journal of Operational Research, Vol. 270, pp. 25-39, 2018. @article{ChanTCY.J040, title = {Trade-off preservation in inverse multi-objective convex optimization}, author = {T. C. Y. Chan and T. Lee}, doi = {10.1016/j.ejor.2018.02.045}, year = {2018}, date = {2018-02-28}, journal = {European Journal of Operational Research}, volume = {270}, pages = {25-39}, abstract = {Given an input solution that may not be Pareto optimal, we present a new inverse optimization methodology for multi-objective convex optimization that determines a weight vector producing a weakly Pareto optimal solution that preserves the decision maker's trade-off intention encoded in the input solution. We introduce a notion of trade-off preservation, which we use as a measure of similarity for approximating the input solution, and show its connection with minimizing an optimality gap. We propose a linear approximation to the inverse model and a successive linear programming algorithm that balance between trade-off preservation and computational efficiency, and show that our model encompasses many of the existing inverse optimization models from the literature. We demonstrate the proposed method using clinical data from prostate cancer radiation therapy.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Given an input solution that may not be Pareto optimal, we present a new inverse optimization methodology for multi-objective convex optimization that determines a weight vector producing a weakly Pareto optimal solution that preserves the decision maker's trade-off intention encoded in the input solution. We introduce a notion of trade-off preservation, which we use as a measure of similarity for approximating the input solution, and show its connection with minimizing an optimality gap. We propose a linear approximation to the inverse model and a successive linear programming algorithm that balance between trade-off preservation and computational efficiency, and show that our model encompasses many of the existing inverse optimization models from the literature. We demonstrate the proposed method using clinical data from prostate cancer radiation therapy. |
39. | Improving fairness in match play golf through enhanced handicap allocation Journal Article T. C. Y. Chan, D. Madras, M. L. Puterman Journal of Sports Analytics, Vol. 4, pp. 251-262, 2018. @article{ChanTCY.J039, title = {Improving fairness in match play golf through enhanced handicap allocation}, author = {T. C. Y. Chan and D. Madras and M. L. Puterman}, doi = {10.3233/JSA-180184}, year = {2018}, date = {2018-02-03}, journal = {Journal of Sports Analytics}, volume = {4}, pages = {251-262}, abstract = {In amateur golf, lower handicap players "give strokes" to higher handicap players based on their handicap differential to make head-to-head matches fairer. In match play, the standard way to allocate handicap strokes uses the "course-defined hole ranking". Using a bootstrapped simulation of over 70,000 matches based on 392 rounds of golf, we first show that the standard stroke allocation method and course-defined hole ranking favor the better player in 53% of matches. Then, we investigate the impact of three potential changes to stroke allocation: modifying the hole ranking; giving both players their full handicaps instead of using handicap differential; awarding extra strokes to the weaker player. Our two primary findings are: 1) fair matches can be achieved by giving the weaker player 0.5 extra strokes, which corresponds to a tie-breaker on a single hole; 2) giving both players their full handicap makes the fairness results robust to different hole rankings. Together, these simple changes can improve fairness in match play golf and improve generalizability to other courses.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In amateur golf, lower handicap players "give strokes" to higher handicap players based on their handicap differential to make head-to-head matches fairer. In match play, the standard way to allocate handicap strokes uses the "course-defined hole ranking". Using a bootstrapped simulation of over 70,000 matches based on 392 rounds of golf, we first show that the standard stroke allocation method and course-defined hole ranking favor the better player in 53% of matches. Then, we investigate the impact of three potential changes to stroke allocation: modifying the hole ranking; giving both players their full handicaps instead of using handicap differential; awarding extra strokes to the weaker player. Our two primary findings are: 1) fair matches can be achieved by giving the weaker player 0.5 extra strokes, which corresponds to a tie-breaker on a single hole; 2) giving both players their full handicap makes the fairness results robust to different hole rankings. Together, these simple changes can improve fairness in match play golf and improve generalizability to other courses. |
38. | Robust defibrillator deployment under cardiac arrest location uncertainty via row-and-column generation Journal Article T. C. Y. Chan, Z.-J. Shen, A. Siddiq Operations Research, Vol. 66, pp. 358-379, 2018. @article{ChanTCY.J038, title = {Robust defibrillator deployment under cardiac arrest location uncertainty via row-and-column generation}, author = {T. C. Y. Chan and Z.-J. Shen and A. Siddiq}, doi = {10.1287/opre.2017.1660}, year = {2018}, date = {2018-01-01}, journal = {Operations Research}, volume = {66}, pages = {358-379}, abstract = {Sudden cardiac arrest is a significant public health concern. Successful treatment of cardiac arrest is extremely time sensitive, and use of an automated external defibrillator (AED) where possible significantly increases the probability of survival. Placement of AEDs in public locations can improve survival by enabling bystanders to treat victims of cardiac arrest prior to the arrival of emergency medical responders, thus shortening the time between collapse and treatment. However, since the exact locations of future cardiac arrests cannot be known a priori, AEDs must be placed strategically in public locations to ensure their accessibility in the event of an out-of-hospital cardiac arrest emergency. In this paper, we propose a data-driven optimization model for deploying AEDs in public spaces while accounting for uncertainty in future cardiac arrest locations. Our approach involves discretizing a continuous service area into a large set of scenarios, where the probability of cardiac arrest at each location is itself uncertain. We model uncertainty in the spatial risk of cardiac arrest using a polyhedral uncertainty set that we calibrate using historical cardiac arrest data. We propose a solution technique based on row-and-column generation that exploits the structure of the uncertainty set, allowing the algorithm to scale gracefully with the total number of scenarios. Using real cardiac arrest data from the City of Toronto, we conduct an extensive numerical study on AED deployment public locations. We find that hedging against cardiac arrest location uncertainty can produce AED deployments that outperform an intuitive sample average approximation by 9%–15% and cuts the performance gap with respect to an ex post model by half. Our findings suggest that accounting for cardiac arrest location uncertainty can lead to improved accessibility of AEDs during cardiac arrest emergencies and the potential for improved survival outcomes.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Sudden cardiac arrest is a significant public health concern. Successful treatment of cardiac arrest is extremely time sensitive, and use of an automated external defibrillator (AED) where possible significantly increases the probability of survival. Placement of AEDs in public locations can improve survival by enabling bystanders to treat victims of cardiac arrest prior to the arrival of emergency medical responders, thus shortening the time between collapse and treatment. However, since the exact locations of future cardiac arrests cannot be known a priori, AEDs must be placed strategically in public locations to ensure their accessibility in the event of an out-of-hospital cardiac arrest emergency. In this paper, we propose a data-driven optimization model for deploying AEDs in public spaces while accounting for uncertainty in future cardiac arrest locations. Our approach involves discretizing a continuous service area into a large set of scenarios, where the probability of cardiac arrest at each location is itself uncertain. We model uncertainty in the spatial risk of cardiac arrest using a polyhedral uncertainty set that we calibrate using historical cardiac arrest data. We propose a solution technique based on row-and-column generation that exploits the structure of the uncertainty set, allowing the algorithm to scale gracefully with the total number of scenarios. Using real cardiac arrest data from the City of Toronto, we conduct an extensive numerical study on AED deployment public locations. We find that hedging against cardiac arrest location uncertainty can produce AED deployments that outperform an intuitive sample average approximation by 9%–15% and cuts the performance gap with respect to an ex post model by half. Our findings suggest that accounting for cardiac arrest location uncertainty can lead to improved accessibility of AEDs during cardiac arrest emergencies and the potential for improved survival outcomes. |
2017 |
|
37. | Increased cardiac arrest survival and bystander intervention in enclosed pedestrian walkway systems Journal Article M. Lee, D. Demirtas, J. E. Buick, M. J. Feldman, S. Cheskes, L. J. Morrison, T. C. Y. Chan Resuscitation, Vol. 118, pp. 1-7, 2017. @article{ChanTCY.J037, title = {Increased cardiac arrest survival and bystander intervention in enclosed pedestrian walkway systems}, author = {M. Lee and D. Demirtas and J. E. Buick and M. J. Feldman and S. Cheskes and L. J. Morrison and T. C. Y. Chan}, doi = {10.1016/j.resuscitation.2017.06.013}, year = {2017}, date = {2017-05-20}, journal = {Resuscitation}, volume = {118}, pages = {1-7}, abstract = {Background: Cities worldwide have underground or above-ground enclosed walkway systems for pedestrian travel, representing unique environments for studying out-of-hospital cardiac arrests (OHCAs). The characteristics and outcomes of OHCAs that occur in such systems are unknown. Objective: To determine whether OHCAs occurring in enclosed pedestrian walkway systems have differing demographics, prehospital intervention, and survival outcomes compared to the encompassing city, by examining the PATH walkway system in Toronto. Methods: We identified all atraumatic, public-location OHCAs in Toronto from April 2006 to March 2016. Exclusion criteria were obvious death, existing DNR, and EMS-witnessed OHCAs. OHCAs were classified into mutually exclusive location groups: Toronto, Downtown, and PATH-accessible. PATH-accessible OHCAs were those that occurred within the PATH system between the first basement and third floor. We analyzed demographic, prehospital intervention, and survival data using t-tests and chi-squared tests. Results: We identified 2172 OHCAs: 1752 Toronto, 371 Downtown, and 49 PATH-accessible. Compared to Toronto, a significantly higher proportion of PATH-accessible OHCAs was bystander-witnessed (62.6% vs 83.7%, p = 0.003), had bystander CPR (56.6% vs 73.5%, p = 0.019), bystander AED use (11.0% vs 42.6%, p<0.001), shockable initial rhythm (45.5% vs 72.9%, p<0.001), and overall survival (18.5% vs 33.3%, p = 0.009). Similar significant differences were observed when compared to Downtown. Conclusions: This study suggests that OHCAs in enclosed pedestrian walkway systems are uniquely different from other public settings. Bystander resuscitation efforts are significantly more frequent and survival rates are significantly higher. Urban planners in similar infrastructure systems worldwide should consider these findings when determining AED placement and public engagement strategies.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Background: Cities worldwide have underground or above-ground enclosed walkway systems for pedestrian travel, representing unique environments for studying out-of-hospital cardiac arrests (OHCAs). The characteristics and outcomes of OHCAs that occur in such systems are unknown. Objective: To determine whether OHCAs occurring in enclosed pedestrian walkway systems have differing demographics, prehospital intervention, and survival outcomes compared to the encompassing city, by examining the PATH walkway system in Toronto. Methods: We identified all atraumatic, public-location OHCAs in Toronto from April 2006 to March 2016. Exclusion criteria were obvious death, existing DNR, and EMS-witnessed OHCAs. OHCAs were classified into mutually exclusive location groups: Toronto, Downtown, and PATH-accessible. PATH-accessible OHCAs were those that occurred within the PATH system between the first basement and third floor. We analyzed demographic, prehospital intervention, and survival data using t-tests and chi-squared tests. Results: We identified 2172 OHCAs: 1752 Toronto, 371 Downtown, and 49 PATH-accessible. Compared to Toronto, a significantly higher proportion of PATH-accessible OHCAs was bystander-witnessed (62.6% vs 83.7%, p = 0.003), had bystander CPR (56.6% vs 73.5%, p = 0.019), bystander AED use (11.0% vs 42.6%, p<0.001), shockable initial rhythm (45.5% vs 72.9%, p<0.001), and overall survival (18.5% vs 33.3%, p = 0.009). Similar significant differences were observed when compared to Downtown. Conclusions: This study suggests that OHCAs in enclosed pedestrian walkway systems are uniquely different from other public settings. Bystander resuscitation efforts are significantly more frequent and survival rates are significantly higher. Urban planners in similar infrastructure systems worldwide should consider these findings when determining AED placement and public engagement strategies. |
36. | Stability and continuity in robust optimization Journal Article T. C. Y. Chan, P. A. Mar SIAM Journal on Optimization, Vol. 27, pp. 817-841, 2017. @article{ChanTCY.J036, title = {Stability and continuity in robust optimization}, author = {T. C. Y. Chan and P. A. Mar}, doi = {10.1137/16M1067512}, year = {2017}, date = {2017-03-20}, journal = {SIAM Journal on Optimization}, volume = {27}, pages = {817-841}, abstract = {We consider the stability of robust optimization (RO) problems with respect to perturbations in their uncertainty sets. In particular, we focus on robust linear optimization problems, including those with an infinite number of constraints, and consider uncertainty in both the cost function and constraints. We prove Lipschitz continuity of the optimal value and ε-approximate optimal solution set with respect to the Hausdorff distance between uncertainty sets. The Lipschitz constant can be calculated from the problem data. In addition, we prove closedness and upper semicontinuity for the optimal solution set with respect to the uncertainty set. In order to prove these results, we develop a novel transformation that maps RO problems to linear semi-infinite optimization (LSIO) problems in such a way that the distance between uncertainty sets of two RO problems correspond to a measure of distance between their equivalent LSIO problems. Using this isometry we leverage LSIO and variational analysis stability results to obtain stability results for RO problems.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We consider the stability of robust optimization (RO) problems with respect to perturbations in their uncertainty sets. In particular, we focus on robust linear optimization problems, including those with an infinite number of constraints, and consider uncertainty in both the cost function and constraints. We prove Lipschitz continuity of the optimal value and ε-approximate optimal solution set with respect to the Hausdorff distance between uncertainty sets. The Lipschitz constant can be calculated from the problem data. In addition, we prove closedness and upper semicontinuity for the optimal solution set with respect to the uncertainty set. In order to prove these results, we develop a novel transformation that maps RO problems to linear semi-infinite optimization (LSIO) problems in such a way that the distance between uncertainty sets of two RO problems correspond to a measure of distance between their equivalent LSIO problems. Using this isometry we leverage LSIO and variational analysis stability results to obtain stability results for RO problems. |
35. | Operations Research in Global Health: A scoping review with a focus on the themes of health equity and impact Journal Article B. D. Bradley, T. Jung, A. Tandon-Verma, B. Khoury, T. C. Y. Chan, Y.-L. Cheng Health Research Policy and Systems, Vol. 15(Article No. 32), 2017. @article{ChanTCY.J035, title = {Operations Research in Global Health: A scoping review with a focus on the themes of health equity and impact}, author = {B. D. Bradley and T. Jung and A. Tandon-Verma and B. Khoury and T. C. Y. Chan and Y.-L. Cheng}, doi = {10.1186/s12961-017-0187-7}, year = {2017}, date = {2017-03-06}, journal = {Health Research Policy and Systems}, volume = {15}, number = {Article No. 32}, abstract = {Background: Operations research (OR) is a discipline that uses advanced analytical methods (e.g. simulation, optimisation, decision analysis) to better understand complex systems and aid in decision-making. Summary: Herein, we present a scoping review of the use of OR to analyse issues in global health, with an emphasis on health equity and research impact. A systematic search of five databases was designed to identify relevant published literature. A global overview of 1099 studies highlights the geographic distribution of OR and common OR methods used. From this collection of literature, a narrative description of the use of OR across four main application areas of global health – health systems and operations, clinical medicine, public health and health innovation – is also presented. The theme of health equity is then explored in detail through a subset of 44 studies. Health equity is a critical element of global health that cuts across all four application areas, and is an issue particularly amenable to analysis through OR. Finally, we present seven select cases of OR analyses that have been implemented or have influenced decision-making in global health policy or practice. Based on these cases, we identify three key drivers for success in bridging the gap between OR and global health policy, namely international collaboration with stakeholders, use of contextually appropriate data, and varied communication outlets for research findings. Such cases, however, represent a very small proportion of the literature found. Conclusion: Poor availability of representative and quality data, and a lack of collaboration between those who develop OR models and stakeholders in the contexts where OR analyses are intended to serve, were found to be common challenges for effective OR modelling in global health.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Background: Operations research (OR) is a discipline that uses advanced analytical methods (e.g. simulation, optimisation, decision analysis) to better understand complex systems and aid in decision-making. Summary: Herein, we present a scoping review of the use of OR to analyse issues in global health, with an emphasis on health equity and research impact. A systematic search of five databases was designed to identify relevant published literature. A global overview of 1099 studies highlights the geographic distribution of OR and common OR methods used. From this collection of literature, a narrative description of the use of OR across four main application areas of global health – health systems and operations, clinical medicine, public health and health innovation – is also presented. The theme of health equity is then explored in detail through a subset of 44 studies. Health equity is a critical element of global health that cuts across all four application areas, and is an issue particularly amenable to analysis through OR. Finally, we present seven select cases of OR analyses that have been implemented or have influenced decision-making in global health policy or practice. Based on these cases, we identify three key drivers for success in bridging the gap between OR and global health policy, namely international collaboration with stakeholders, use of contextually appropriate data, and varied communication outlets for research findings. Such cases, however, represent a very small proportion of the literature found. Conclusion: Poor availability of representative and quality data, and a lack of collaboration between those who develop OR models and stakeholders in the contexts where OR analyses are intended to serve, were found to be common challenges for effective OR modelling in global health. |
34. | Optimizing a drone network to deliver automated external defibrillators Journal Article J. J. Boutilier, S. C. Brooks, A. Janmohamed, A. Byers, J. E. Buick, C. Zhan, A. P. Schoellig, S. Cheskes, L. J. Morrison, T. C. Y. Chan Circulation, Vol. 135, pp. 2454-2465, 2017. @article{ChanTCY.J034, title = {Optimizing a drone network to deliver automated external defibrillators}, author = {J. J. Boutilier and S. C. Brooks and A. Janmohamed and A. Byers and J. E. Buick and C. Zhan and A. P. Schoellig and S. Cheskes and L. J. Morrison and T. C. Y. Chan}, doi = {10.1161/CIRCULATIONAHA.116.026318}, year = {2017}, date = {2017-02-17}, journal = {Circulation}, volume = {135}, pages = {2454-2465}, abstract = {Background: Public access defibrillation programs can improve survival after out-of-hospital cardiac arrest, but automated external defibrillators (AEDs) are rarely available for bystander use at the scene. Drones are an emerging technology that can deliver an AED to the scene of an out-of-hospital cardiac arrest for bystander use. We hypothesize that a drone network designed with the aid of a mathematical model combining both optimization and queuing can reduce the time to AED arrival. Methods: We applied our model to 53 702 out-of-hospital cardiac arrests that occurred in the 8 regions of the Toronto Regional RescuNET between January 1, 2006, and December 31, 2014. Our primary analysis quantified the drone network size required to deliver an AED 1, 2, or 3 minutes faster than historical median 911 response times for each region independently. A secondary analysis quantified the reduction in drone resources required if RescuNET was treated as a large coordinated region. Results: The region-specific analysis determined that 81 bases and 100 drones would be required to deliver an AED ahead of median 911 response times by 3 minutes. In the most urban region, the 90th percentile of the AED arrival time was reduced by 6 minutes and 43 seconds relative to historical 911 response times in the region. In the most rural region, the 90th percentile was reduced by 10 minutes and 34 seconds. A single coordinated drone network across all regions required 39.5% fewer bases and 30.0% fewer drones to achieve similar AED delivery times. Conclusions: An optimized drone network designed with the aid of a novel mathematical model can substantially reduce the AED delivery time to an out-of-hospital cardiac arrest event.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Background: Public access defibrillation programs can improve survival after out-of-hospital cardiac arrest, but automated external defibrillators (AEDs) are rarely available for bystander use at the scene. Drones are an emerging technology that can deliver an AED to the scene of an out-of-hospital cardiac arrest for bystander use. We hypothesize that a drone network designed with the aid of a mathematical model combining both optimization and queuing can reduce the time to AED arrival. Methods: We applied our model to 53 702 out-of-hospital cardiac arrests that occurred in the 8 regions of the Toronto Regional RescuNET between January 1, 2006, and December 31, 2014. Our primary analysis quantified the drone network size required to deliver an AED 1, 2, or 3 minutes faster than historical median 911 response times for each region independently. A secondary analysis quantified the reduction in drone resources required if RescuNET was treated as a large coordinated region. Results: The region-specific analysis determined that 81 bases and 100 drones would be required to deliver an AED ahead of median 911 response times by 3 minutes. In the most urban region, the 90th percentile of the AED arrival time was reduced by 6 minutes and 43 seconds relative to historical 911 response times in the region. In the most rural region, the 90th percentile was reduced by 10 minutes and 34 seconds. A single coordinated drone network across all regions required 39.5% fewer bases and 30.0% fewer drones to achieve similar AED delivery times. Conclusions: An optimized drone network designed with the aid of a novel mathematical model can substantially reduce the AED delivery time to an out-of-hospital cardiac arrest event. |
33. | Ranking businesses and municipal locations by spatiotemporal cardiac arrest risk to guide public defibrillator placement Journal Article C. L. F. Sun, S. C. Brooks, L. J. Morrison, T. C. Y. Chan Circulation, Vol. 135, pp. 1104-1119, 2017. @article{ChanTCY.J033, title = {Ranking businesses and municipal locations by spatiotemporal cardiac arrest risk to guide public defibrillator placement}, author = {C. L. F. Sun and S. C. Brooks and L. J. Morrison and T. C. Y. Chan}, doi = {10.1161/CIRCULATIONAHA.116.025349}, year = {2017}, date = {2017-01-17}, journal = {Circulation}, volume = {135}, pages = {1104-1119}, abstract = {Background: Efforts to guide automated external defibrillator placement for out-of-hospital cardiac arrest (OHCA) treatment have focused on identifying broadly defined location categories without considering hours of operation. Broad location categories may be composed of many businesses with varying accessibility. Identifying specific locations for automated external defibrillator deployment incorporating operating hours and time of OHCA occurrence may improve automated external defibrillator accessibility. We aim to identify specific businesses and municipal locations that maximize OHCA coverage on the basis of spatiotemporal assessment of OHCA risk in the immediate vicinity of franchise locations. Methods: This study was a retrospective population-based cohort study using data from the Toronto Regional RescuNET Epistry cardiac arrest database. We identified all nontraumatic public OHCAs occurring in Toronto, ON, Canada, from January 2007 through December 2015. We identified 41 unique businesses and municipal location types with ≥20 locations in Toronto from the YellowPages, Canadian Franchise Association, and the City of Toronto Open Data Portal. We obtained their geographic coordinates and hours of operation from Web sites, by phone, or in person. We determined the number of OHCAs that occurred within 100 m of each location when it was open (spatiotemporal coverage) for Toronto overall and downtown. The businesses and municipal locations were then ranked by spatiotemporal OHCA coverage. To evaluate temporal stability of the rankings, we calculated intraclass correlation of the annual coverage values. Results: There were 2654 nontraumatic public OHCAs. Tim Hortons ranked first in Toronto, covering 286 OHCAs. Starbucks ranked first in downtown, covering 110 OHCAs. Coffee shops and bank machines from the 5 largest Canadian banks occupied 8 of the top 10 spots in both Toronto and downtown. The rankings exhibited high temporal stability with intraclass correlation values of 0.88 (95% confidence interval, 0.83–0.93) in Toronto and 0.79 (95% confidence interval, 0.71–0.86) in downtown. Conclusions: We identified and ranked businesses and municipal locations by spatiotemporal OHCA risk in their immediate vicinity. This approach may help policy makers and funders to identify and prioritize potential partnerships for automated external defibrillator deployment in public-access defibrillator programs.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Background: Efforts to guide automated external defibrillator placement for out-of-hospital cardiac arrest (OHCA) treatment have focused on identifying broadly defined location categories without considering hours of operation. Broad location categories may be composed of many businesses with varying accessibility. Identifying specific locations for automated external defibrillator deployment incorporating operating hours and time of OHCA occurrence may improve automated external defibrillator accessibility. We aim to identify specific businesses and municipal locations that maximize OHCA coverage on the basis of spatiotemporal assessment of OHCA risk in the immediate vicinity of franchise locations. Methods: This study was a retrospective population-based cohort study using data from the Toronto Regional RescuNET Epistry cardiac arrest database. We identified all nontraumatic public OHCAs occurring in Toronto, ON, Canada, from January 2007 through December 2015. We identified 41 unique businesses and municipal location types with ≥20 locations in Toronto from the YellowPages, Canadian Franchise Association, and the City of Toronto Open Data Portal. We obtained their geographic coordinates and hours of operation from Web sites, by phone, or in person. We determined the number of OHCAs that occurred within 100 m of each location when it was open (spatiotemporal coverage) for Toronto overall and downtown. The businesses and municipal locations were then ranked by spatiotemporal OHCA coverage. To evaluate temporal stability of the rankings, we calculated intraclass correlation of the annual coverage values. Results: There were 2654 nontraumatic public OHCAs. Tim Hortons ranked first in Toronto, covering 286 OHCAs. Starbucks ranked first in downtown, covering 110 OHCAs. Coffee shops and bank machines from the 5 largest Canadian banks occupied 8 of the top 10 spots in both Toronto and downtown. The rankings exhibited high temporal stability with intraclass correlation values of 0.88 (95% confidence interval, 0.83–0.93) in Toronto and 0.79 (95% confidence interval, 0.71–0.86) in downtown. Conclusions: We identified and ranked businesses and municipal locations by spatiotemporal OHCA risk in their immediate vicinity. This approach may help policy makers and funders to identify and prioritize potential partnerships for automated external defibrillator deployment in public-access defibrillator programs. |
32. | Rise and shock: Optimal defibrillator placement in a high-rise building Journal Article T. C. Y. Chan Prehospital Emergency Care, Vol. 21, pp. 309-314, 2017. @article{ChanTCY.J032, title = {Rise and shock: Optimal defibrillator placement in a high-rise building}, author = {T. C. Y. Chan}, doi = {10.1080/10903127.2016.1247202}, year = {2017}, date = {2017-01-01}, journal = {Prehospital Emergency Care}, volume = {21}, pages = {309-314}, abstract = {Objective: Out-of-hospital cardiac arrests (OHCA) in high-rise buildings experience lower survival and longer delays until paramedic arrival. Use of publicly accessible automated external defibrillators (AED) can improve survival, but "vertical" placement has not been studied. We aim to determine whether elevator-based or lobby-based AED placement results in shorter vertical distance travelled ("response distance") to OHCAs in a high-rise building. Methods: We developed a model of a single-elevator, n-floor high-rise building. We calculated and compared the average distance from AED to floor of arrest for the two AED locations. We modeled OHCA occurrences using floor-specific Poisson processes, the risk of OHCA on the ground floor (λ_1) and the risk on any above-ground floor (λ). The elevator was modeled with an override function enabling direct travel to the target floor. The elevator location upon override was modeled as a discrete uniform random variable. Calculations used the laws of probability. Results: Elevator-based AED placement had shorter average response distance if the number of floors (n) in the building exceeded three quarters of the ratio of ground-floor OHCA risk to above-ground floor risk (λ_1/λ) plus one half (n ≥ 3λ_1/4λ + 0.5). Otherwise, a lobby-based AED had shorter average response distance. If OHCA risk on each floor was equal, an elevator-based AED had shorter average response distance. Conclusions: Elevator-based AEDs travel less vertical distance to OHCAs in tall buildings or those with uniform vertical risk, while lobby-based AEDs travel less vertical distance in buildings with substantial lobby, underground, and nearby street-level traffic and OHCA risk.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Objective: Out-of-hospital cardiac arrests (OHCA) in high-rise buildings experience lower survival and longer delays until paramedic arrival. Use of publicly accessible automated external defibrillators (AED) can improve survival, but "vertical" placement has not been studied. We aim to determine whether elevator-based or lobby-based AED placement results in shorter vertical distance travelled ("response distance") to OHCAs in a high-rise building. Methods: We developed a model of a single-elevator, n-floor high-rise building. We calculated and compared the average distance from AED to floor of arrest for the two AED locations. We modeled OHCA occurrences using floor-specific Poisson processes, the risk of OHCA on the ground floor (λ_1) and the risk on any above-ground floor (λ). The elevator was modeled with an override function enabling direct travel to the target floor. The elevator location upon override was modeled as a discrete uniform random variable. Calculations used the laws of probability. Results: Elevator-based AED placement had shorter average response distance if the number of floors (n) in the building exceeded three quarters of the ratio of ground-floor OHCA risk to above-ground floor risk (λ_1/λ) plus one half (n ≥ 3λ_1/4λ + 0.5). Otherwise, a lobby-based AED had shorter average response distance. If OHCA risk on each floor was equal, an elevator-based AED had shorter average response distance. Conclusions: Elevator-based AEDs travel less vertical distance to OHCAs in tall buildings or those with uniform vertical risk, while lobby-based AEDs travel less vertical distance in buildings with substantial lobby, underground, and nearby street-level traffic and OHCA risk. |
2016 |
|
31. | A Markov Decision Process-based handicap system for tennis Journal Article T. C. Y. Chan, R. Singal Journal of Quantitative Analysis in Sports, Vol. 12, pp. 179-189, 2016. @article{ChanTCY.J031, title = {A Markov Decision Process-based handicap system for tennis}, author = {T. C. Y. Chan and R. Singal}, doi = {10.1515/jqas-2016-0057}, year = {2016}, date = {2016-04-01}, journal = {Journal of Quantitative Analysis in Sports}, volume = {12}, pages = {179-189}, abstract = {Handicap systems are used in many sports to improve competitive balance and equalize the match-win probability between opponents of differing ability. Recognizing the absence of such a system in tennis, we develop a novel optimization-based handicap system for tennis using a Markov Decision Process (MDP) model. In our handicap system, the weaker player is given β "free points" or "credits" at the start of the match, which he can use before the start of any point during the match to win the point outright. The MDP model determines two key features of the handicap system: (1) Fairness: the minimum value of β required to equalize the match-win probability, and (2) Achievability: the optimal policy governing usage of the β credits to achieve the desired match-win probability. We test the sensitivity of the handicap values to the model's input parameters. Finally, we apply the model to real match data to estimate professional handicaps.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Handicap systems are used in many sports to improve competitive balance and equalize the match-win probability between opponents of differing ability. Recognizing the absence of such a system in tennis, we develop a novel optimization-based handicap system for tennis using a Markov Decision Process (MDP) model. In our handicap system, the weaker player is given β "free points" or "credits" at the start of the match, which he can use before the start of any point during the match to win the point outright. The MDP model determines two key features of the handicap system: (1) Fairness: the minimum value of β required to equalize the match-win probability, and (2) Achievability: the optimal policy governing usage of the β credits to achieve the desired match-win probability. We test the sensitivity of the handicap values to the model's input parameters. Finally, we apply the model to real match data to estimate professional handicaps. |
30. | Overcoming spatial and temporal barriers to public access defibrillators via optimization Journal Article C. L. F. Sun, D. Demirtas, S. C. Brooks, L. J. Morrison, T. C. Y. Chan Journal of the American College of Cardiology, Vol. 68, pp. 836-845, 2016. @article{ChanTCY.J030, title = {Overcoming spatial and temporal barriers to public access defibrillators via optimization}, author = {C. L. F. Sun and D. Demirtas and S. C. Brooks and L. J. Morrison and T. C. Y. Chan}, doi = {10.1016/j.jacc.2016.03.609}, year = {2016}, date = {2016-03-29}, journal = {Journal of the American College of Cardiology}, volume = {68}, pages = {836-845}, abstract = {BACKGROUND: Immediate access to an automated external defibrillator (AED) increases the chance of survival for out-of-hospital cardiac arrest (OHCA). Current deployment usually considers spatial AED access, assuming AEDs are available 24 h a day. OBJECTIVES: The goal of this study was to develop an optimization model for AED deployment, accounting for spatial and temporal accessibility, to evaluate if OHCA coverage would improve compared with deployment based on spatial accessibility alone. METHODS: This study was a retrospective population-based cohort trial using data from the Toronto Regional RescuNET Epistry cardiac arrest database. We identified all nontraumatic public location OHCAs in Toronto, Ontario, Canada (January 2006 through August 2014) and obtained a list of registered AEDs (March 2015) from Toronto Paramedic Services. Coverage loss due to limited temporal access was quantified by comparing the number of OHCAs that occurred within 100 meters of a registered AED (assumed coverage 24 h per day, 7 days per week) with the number that occurred both within 100 meters of a registered AED and when the AED was available (actual coverage). A spatiotemporal optimization model was then developed that determined AED locations to maximize OHCA actual coverage and overcome the reported coverage loss. The coverage gain between the spatiotemporal model and a spatial-only model was computed by using 10-fold cross-validation. RESULTS: A total of 2,440 nontraumatic public OHCAs and 737 registered AED locations were identified. A total of 451 OHCAs were covered by registered AEDs under assumed coverage 24 h per day, 7 days per week, and 354 OHCAs under actual coverage, representing a coverage loss of 21.5% (p < 0.001). Using the spatiotemporal model to optimize AED deployment, a 25.3% relative increase in actual coverage was achieved compared with the spatial-only approach (p < 0.001). CONCLUSIONS: One in 5 OHCAs occurred near an inaccessible AED at the time of the OHCA. Potential AED use was significantly improved with a spatiotemporal optimization model guiding deployment.}, keywords = {}, pubstate = {published}, tppubtype = {article} } BACKGROUND: Immediate access to an automated external defibrillator (AED) increases the chance of survival for out-of-hospital cardiac arrest (OHCA). Current deployment usually considers spatial AED access, assuming AEDs are available 24 h a day. OBJECTIVES: The goal of this study was to develop an optimization model for AED deployment, accounting for spatial and temporal accessibility, to evaluate if OHCA coverage would improve compared with deployment based on spatial accessibility alone. METHODS: This study was a retrospective population-based cohort trial using data from the Toronto Regional RescuNET Epistry cardiac arrest database. We identified all nontraumatic public location OHCAs in Toronto, Ontario, Canada (January 2006 through August 2014) and obtained a list of registered AEDs (March 2015) from Toronto Paramedic Services. Coverage loss due to limited temporal access was quantified by comparing the number of OHCAs that occurred within 100 meters of a registered AED (assumed coverage 24 h per day, 7 days per week) with the number that occurred both within 100 meters of a registered AED and when the AED was available (actual coverage). A spatiotemporal optimization model was then developed that determined AED locations to maximize OHCA actual coverage and overcome the reported coverage loss. The coverage gain between the spatiotemporal model and a spatial-only model was computed by using 10-fold cross-validation. RESULTS: A total of 2,440 nontraumatic public OHCAs and 737 registered AED locations were identified. A total of 451 OHCAs were covered by registered AEDs under assumed coverage 24 h per day, 7 days per week, and 354 OHCAs under actual coverage, representing a coverage loss of 21.5% (p < 0.001). Using the spatiotemporal model to optimize AED deployment, a 25.3% relative increase in actual coverage was achieved compared with the spatial-only approach (p < 0.001). CONCLUSIONS: One in 5 OHCAs occurred near an inaccessible AED at the time of the OHCA. Potential AED use was significantly improved with a spatiotemporal optimization model guiding deployment. |
29. | Sample size requirements for knowledge-based treatment planning Journal Article J. J. Boutilier, T. Craig, M. B. Sharpe, T. C. Y. Chan Medical Physics, Vol. 43, pp. 1212-1221, 2016. @article{ChanTCY.J029, title = {Sample size requirements for knowledge-based treatment planning}, author = {J. J. Boutilier and T. Craig and M. B. Sharpe and T. C. Y. Chan}, doi = {10.1118/1.4941363}, year = {2016}, date = {2016-01-24}, journal = {Medical Physics}, volume = {43}, pages = {1212-1221}, abstract = {Purpose: To determine how training set size affects the accuracy of knowledge-based treatment planning (KBP) models. Methods: The authors selected four models from three classes of KBP approaches, corresponding to three distinct quantities that KBP models may predict: dose–volume histogram (DVH) points, DVH curves, and objective function weights. DVH point prediction is done using the best plan from a database of similar clinical plans; DVH curve prediction employs principal component analysis and multiple linear regression; and objective function weights uses either logistic regression or K-nearest neighbors. The authors trained each KBP model using training sets of sizes n = 10, 20, 30, 50, 75, 100, 150, and 200. The authors set aside 100 randomly selected patients from their cohort of 315 prostate cancer patients from Princess Margaret Cancer Center to serve as a validation set for all experiments. For each value of n, the authors randomly selected 100 different training sets with replacement from the remaining 215 patients. Each of the 100 training sets was used to train a model for each value of n and for each KBT approach. To evaluate the models, the authors predicted the KBP endpoints for each of the 100 patients in the validation set. To estimate the minimum required sample size, the authors used statistical testing to determine if the median error for each sample size from 10 to 150 is equal to the median error for the maximum sample size of 200. Results: The minimum required sample size was different for each model. The DVH point prediction method predicts two dose metrics for the bladder and two for the rectum. The authors found that more than 200 samples were required to achieve consistent model predictions for all four metrics. For DVH curve prediction, the authors found that at least 75 samples were needed to accurately predict the bladder DVH, while only 20 samples were needed to predict the rectum DVH. Finally, for objective function weight prediction, at least 10 samples were needed to train the logistic regression model, while at least 150 samples were required to train the K-nearest neighbor methodology. Conclusions: In conclusion, the minimum required sample size needed to accurately train KBP models for prostate cancer depends on the specific model and endpoint to be predicted. The authors' results may provide a lower bound for more complicated tumor sites.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Purpose: To determine how training set size affects the accuracy of knowledge-based treatment planning (KBP) models. Methods: The authors selected four models from three classes of KBP approaches, corresponding to three distinct quantities that KBP models may predict: dose–volume histogram (DVH) points, DVH curves, and objective function weights. DVH point prediction is done using the best plan from a database of similar clinical plans; DVH curve prediction employs principal component analysis and multiple linear regression; and objective function weights uses either logistic regression or K-nearest neighbors. The authors trained each KBP model using training sets of sizes n = 10, 20, 30, 50, 75, 100, 150, and 200. The authors set aside 100 randomly selected patients from their cohort of 315 prostate cancer patients from Princess Margaret Cancer Center to serve as a validation set for all experiments. For each value of n, the authors randomly selected 100 different training sets with replacement from the remaining 215 patients. Each of the 100 training sets was used to train a model for each value of n and for each KBT approach. To evaluate the models, the authors predicted the KBP endpoints for each of the 100 patients in the validation set. To estimate the minimum required sample size, the authors used statistical testing to determine if the median error for each sample size from 10 to 150 is equal to the median error for the maximum sample size of 200. Results: The minimum required sample size was different for each model. The DVH point prediction method predicts two dose metrics for the bladder and two for the rectum. The authors found that more than 200 samples were required to achieve consistent model predictions for all four metrics. For DVH curve prediction, the authors found that at least 75 samples were needed to accurately predict the bladder DVH, while only 20 samples were needed to predict the rectum DVH. Finally, for objective function weight prediction, at least 10 samples were needed to train the logistic regression model, while at least 150 samples were required to train the K-nearest neighbor methodology. Conclusions: In conclusion, the minimum required sample size needed to accurately train KBP models for prostate cancer depends on the specific model and endpoint to be predicted. The authors' results may provide a lower bound for more complicated tumor sites. |
28. | Optimizing the deployment of public access defibrillators Journal Article T. C. Y. Chan, D. Demirtas, R. H. Kwon Management Science, Vol. 62, pp. 3617-3635, 2016. @article{ChanTCY.J028, title = {Optimizing the deployment of public access defibrillators}, author = {T. C. Y. Chan and D. Demirtas and R. H. Kwon}, doi = {10.1287/mnsc.2015.2312}, year = {2016}, date = {2016-01-02}, journal = {Management Science}, volume = {62}, pages = {3617-3635}, abstract = {Out-of-hospital cardiac arrest is a significant public health issue, and treatment, namely, cardiopulmonary resuscitation and defibrillation, is very time sensitive. Public access defibrillation programs, which deploy automated external defibrillators (AEDs) for bystander use in an emergency, reduce the time to defibrillation and improve survival rates. In this paper, we develop models to guide the deployment of public AEDs. Our models generalize existing location models and incorporate differences in bystander behavior. We formulate three mixed integer nonlinear models and derive equivalent integer linear reformulations or easily computable bounds. We use kernel density estimation to derive a spatial probability distribution of cardiac arrests that is used for optimization and model evaluation. Using data from Toronto, Canada, we show that optimizing AED deployment outperforms the existing approach by 40% in coverage, and substantial gains can be achieved through relocating existing AEDs. Our results suggest that improvements in survival and cost-effectiveness are possible with optimization.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Out-of-hospital cardiac arrest is a significant public health issue, and treatment, namely, cardiopulmonary resuscitation and defibrillation, is very time sensitive. Public access defibrillation programs, which deploy automated external defibrillators (AEDs) for bystander use in an emergency, reduce the time to defibrillation and improve survival rates. In this paper, we develop models to guide the deployment of public AEDs. Our models generalize existing location models and incorporate differences in bystander behavior. We formulate three mixed integer nonlinear models and derive equivalent integer linear reformulations or easily computable bounds. We use kernel density estimation to derive a spatial probability distribution of cardiac arrests that is used for optimization and model evaluation. Using data from Toronto, Canada, we show that optimizing AED deployment outperforms the existing approach by 40% in coverage, and substantial gains can be achieved through relocating existing AEDs. Our results suggest that improvements in survival and cost-effectiveness are possible with optimization. |
27. | Constraint generation methods for robust optimization in radiation therapy Journal Article H. Mahmoudzadeh, T. G. Purdie, T. C. Y. Chan Operations Research for Health Care, Vol. 8, pp. 85-90, 2016. @article{ChanTCY.J027, title = {Constraint generation methods for robust optimization in radiation therapy}, author = {H. Mahmoudzadeh and T. G. Purdie and T. C. Y. Chan}, doi = {10.1016/j.orhc.2015.03.003}, year = {2016}, date = {2016-01-01}, journal = {Operations Research for Health Care}, volume = {8}, pages = {85-90}, abstract = {We develop a constraint generation solution method for robust optimization problems in radiation therapy in which the problems include a large number of robust constraints. Each robust constraint must hold for any realization of an uncertain parameter within a given uncertainty set. Because the problems are large scale, the robust counterpart is computationally challenging to solve. To address this challenge, we explore different strategies of adding constraints in a constraint generation solution approach. We motivate and demonstrate our approach using robust intensity-modulated radiation therapy treatment planning for breast cancer. We use clinical data to compare the computational efficiency of our constraint generation strategies with that of directly solving the robust counterpart.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We develop a constraint generation solution method for robust optimization problems in radiation therapy in which the problems include a large number of robust constraints. Each robust constraint must hold for any realization of an uncertain parameter within a given uncertainty set. Because the problems are large scale, the robust counterpart is computationally challenging to solve. To address this challenge, we explore different strategies of adding constraints in a constraint generation solution approach. We motivate and demonstrate our approach using robust intensity-modulated radiation therapy treatment planning for breast cancer. We use clinical data to compare the computational efficiency of our constraint generation strategies with that of directly solving the robust counterpart. |
2015 |
|
26. | The value of nodal information in predicting lung cancer relapse using 4DPET/4DCT Journal Article H. Li, N. Becker, S. Raman, T. C. Y. Chan, J.-P. Bissonnette Medical Physics, Vol. 42, pp. 4727-4733, 2015. @article{ChanTCY.J026, title = {The value of nodal information in predicting lung cancer relapse using 4DPET/4DCT}, author = {H. Li and N. Becker and S. Raman and T. C. Y. Chan and J.-P. Bissonnette}, doi = {10.1118/1.4926755}, year = {2015}, date = {2015-07-01}, journal = {Medical Physics}, volume = {42}, pages = {4727-4733}, abstract = {Purpose: There is evidence that computed tomography (CT) and positron emission tomography (PET) imaging metrics are prognostic and predictive in nonsmall cell lung cancer (NSCLC) treatment outcomes. However, few studies have explored the use of standardized uptake value (SUV)-based image features of nodal regions as predictive features. The authors investigated and compared the use of tumor and node image features extracted from the radiotherapy target volumes to predict relapse in a cohort of NSCLC patients undergoing chemoradiation treatment. Methods: A prospective cohort of 25 patients with locally advanced NSCLC underwent 4DPET/4DCT imaging for radiation planning. Thirty-seven image features were derived from the CT-defined volumes and SUVs of the PET image from both the tumor and nodal target regions. The machine learning methods of logistic regression and repeated stratified five-fold cross-validation (CV) were used to predict local and overall relapses in 2 yr. The authors used well-known feature selection methods (Spearman's rank correlation, recursive feature elimination) within each fold of CV. Classifiers were ranked on their Matthew's correlation coefficient (MCC) after CV. Area under the curve, sensitivity, and specificity values are also presented. Results: For predicting local relapse, the best classifier found had a mean MCC of 0.07 and was composed of eight tumor features. For predicting overall relapse, the best classifier found had a mean MCC of 0.29 and was composed of a single feature: the volume greater than 0.5 times the maximum SUV (N). Conclusions: The best classifier for predicting local relapse had only tumor features. In contrast, the best classifier for predicting overall relapse included a node feature. Overall, the methods showed that nodes add value in predicting overall relapse but not local relapse.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Purpose: There is evidence that computed tomography (CT) and positron emission tomography (PET) imaging metrics are prognostic and predictive in nonsmall cell lung cancer (NSCLC) treatment outcomes. However, few studies have explored the use of standardized uptake value (SUV)-based image features of nodal regions as predictive features. The authors investigated and compared the use of tumor and node image features extracted from the radiotherapy target volumes to predict relapse in a cohort of NSCLC patients undergoing chemoradiation treatment. Methods: A prospective cohort of 25 patients with locally advanced NSCLC underwent 4DPET/4DCT imaging for radiation planning. Thirty-seven image features were derived from the CT-defined volumes and SUVs of the PET image from both the tumor and nodal target regions. The machine learning methods of logistic regression and repeated stratified five-fold cross-validation (CV) were used to predict local and overall relapses in 2 yr. The authors used well-known feature selection methods (Spearman's rank correlation, recursive feature elimination) within each fold of CV. Classifiers were ranked on their Matthew's correlation coefficient (MCC) after CV. Area under the curve, sensitivity, and specificity values are also presented. Results: For predicting local relapse, the best classifier found had a mean MCC of 0.07 and was composed of eight tumor features. For predicting overall relapse, the best classifier found had a mean MCC of 0.29 and was composed of a single feature: the volume greater than 0.5 times the maximum SUV (N). Conclusions: The best classifier for predicting local relapse had only tumor features. In contrast, the best classifier for predicting overall relapse included a node feature. Overall, the methods showed that nodes add value in predicting overall relapse but not local relapse. |
25. | Robust PET-guided intensity-modulated radiation therapy Journal Article H. Li, J. P. Bissonnette, T. Purdie, T. C. Y. Chan Medical Physics, Vol. 42, pp. 4863-4871, 2015. @article{ChanTCY.J025, title = {Robust PET-guided intensity-modulated radiation therapy}, author = {H. Li and J. P. Bissonnette and T. Purdie and T. C. Y. Chan}, doi = {10.1118/1.4926845}, year = {2015}, date = {2015-06-23}, journal = {Medical Physics}, volume = {42}, pages = {4863-4871}, abstract = {Purpose: Functional image guided intensity-modulated radiation therapy has the potential to improve cancer treatment quality by basing treatment parameters such as heterogeneous dose distributions information derived from imaging. However, such heterogeneous dose distributions are subject to imaging uncertainty. In this paper, the authors develop a robust optimization model to design plans that are desensitized to imaging uncertainty. Methods: Starting from the pretreatment fluorodeoxyglucose-positron emission tomography scans, the authors use the raw voxel standard uptake values (SUVs) as input into a series of intermediate functions to transform the SUV into a desired dose. The calculated desired doses were used as an input into a robust optimization model to generate beamlet intensities. For each voxel, the authors assume that the true SUV cannot be observed but instead resides in an interval centered on the nominal (i.e., observed) SUV. Then the authors evaluated the nominal and robust solutions through a simulation study. The simulation considered the effect of the true SUV being different from the nominal SUV on the quality of the treatment plan. Treatment plans were compared on the metrics of objective function value and tumor control probability (TCP). Results: Computational results demonstrate the potential for improvements in tumor control probability and deviation from the desired dose distribution compared to a nonrobust model while maintaining acceptable tissue dose. Conclusions: Robust optimization can help design treatment plans that are more stable in the presence of image value uncertainties.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Purpose: Functional image guided intensity-modulated radiation therapy has the potential to improve cancer treatment quality by basing treatment parameters such as heterogeneous dose distributions information derived from imaging. However, such heterogeneous dose distributions are subject to imaging uncertainty. In this paper, the authors develop a robust optimization model to design plans that are desensitized to imaging uncertainty. Methods: Starting from the pretreatment fluorodeoxyglucose-positron emission tomography scans, the authors use the raw voxel standard uptake values (SUVs) as input into a series of intermediate functions to transform the SUV into a desired dose. The calculated desired doses were used as an input into a robust optimization model to generate beamlet intensities. For each voxel, the authors assume that the true SUV cannot be observed but instead resides in an interval centered on the nominal (i.e., observed) SUV. Then the authors evaluated the nominal and robust solutions through a simulation study. The simulation considered the effect of the true SUV being different from the nominal SUV on the quality of the treatment plan. Treatment plans were compared on the metrics of objective function value and tumor control probability (TCP). Results: Computational results demonstrate the potential for improvements in tumor control probability and deviation from the desired dose distribution compared to a nonrobust model while maintaining acceptable tissue dose. Conclusions: Robust optimization can help design treatment plans that are more stable in the presence of image value uncertainties. |
24. | The perils of adapting to dose errors in radiation therapy Journal Article V. V. Mišić, T. C. Y. Chan PLOS ONE, Vol. 10(Article No. e0125335), 2015. @article{ChanTCY.J024, title = {The perils of adapting to dose errors in radiation therapy}, author = {V. V. Mišić and T. C. Y. Chan}, doi = {10.1371/journal.pone.0125335}, year = {2015}, date = {2015-03-14}, journal = {PLOS ONE}, volume = {10}, number = {Article No. e0125335}, abstract = {We consider adaptive robust methods for lung cancer that are also dose-reactive, wherein the treatment is modified after each treatment session to account for the dose delivered in prior treatment sessions. Such methods are of interest because they potentially allow for errors in the delivered dose to be corrected as the treatment progresses, thereby ensuring that the tumor receives a sufficient dose at the end of the treatment. We show through a computational study with real lung cancer patient data that while dose reaction is beneficial with respect to the final dose distribution, it may lead to exaggerated daily underdose and overdose relative to non-reactive methods that grows as the treatment progresses. However, by combining dose reaction with a mechanism for updating an estimate of the uncertainty, the magnitude of this growth can be mitigated substantially. The key finding of this paper is that reacting to dose errors – an adaptation strategy that is both simple and intuitively appealing – may backfire and lead to treatments that are clinically unacceptable.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We consider adaptive robust methods for lung cancer that are also dose-reactive, wherein the treatment is modified after each treatment session to account for the dose delivered in prior treatment sessions. Such methods are of interest because they potentially allow for errors in the delivered dose to be corrected as the treatment progresses, thereby ensuring that the tumor receives a sufficient dose at the end of the treatment. We show through a computational study with real lung cancer patient data that while dose reaction is beneficial with respect to the final dose distribution, it may lead to exaggerated daily underdose and overdose relative to non-reactive methods that grows as the treatment progresses. However, by combining dose reaction with a mechanism for updating an estimate of the uncertainty, the magnitude of this growth can be mitigated substantially. The key finding of this paper is that reacting to dose errors – an adaptation strategy that is both simple and intuitively appealing – may backfire and lead to treatments that are clinically unacceptable. |
23. | Adaptive and robust radiation therapy in the presence of drift Journal Article P. A. Mar, T. C. Y. Chan Physics in Medicine and Biology, Vol. 60, pp. 3599-3615, 2015. @article{ChanTCY.J023, title = {Adaptive and robust radiation therapy in the presence of drift}, author = {P. A. Mar and T. C. Y. Chan}, doi = {10.1088/0031-9155/60/9/3599}, year = {2015}, date = {2015-03-11}, journal = {Physics in Medicine and Biology}, volume = {60}, pages = {3599-3615}, abstract = {Combining adaptive and robust optimization in radiation therapy has the potential to mitigate the negative effects of both intrafraction and interfraction uncertainty over a fractionated treatment course. A previously developed adaptive and robust radiation therapy (ARRT) method for lung cancer was demonstrated to be effective when the sequence of breathing patterns was well-behaved. In this paper, we examine the applicability of the ARRT method to less well-behaved breathing patterns. We develop a novel method to generate sequences of probability mass functions that represent different types of drift in the underlying breathing pattern. Computational results derived from applying the ARRT method to these sequences demonstrate that the ARRT method is effective for a much broader class of breathing patterns than previously demonstrated.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Combining adaptive and robust optimization in radiation therapy has the potential to mitigate the negative effects of both intrafraction and interfraction uncertainty over a fractionated treatment course. A previously developed adaptive and robust radiation therapy (ARRT) method for lung cancer was demonstrated to be effective when the sequence of breathing patterns was well-behaved. In this paper, we examine the applicability of the ARRT method to less well-behaved breathing patterns. We develop a novel method to generate sequences of probability mass functions that represent different types of drift in the underlying breathing pattern. Computational results derived from applying the ARRT method to these sequences demonstrate that the ARRT method is effective for a much broader class of breathing patterns than previously demonstrated. |
22. | Robust optimization methods for cardiac sparing in tangential breast IMRT Journal Article H. Mahmoudzadeh, J. Lee, T. C. Y. Chan, T. G. Purdie Medical Physics, Vol. 42, pp. 2212-2222, 2015. @article{ChanTCY.J022, title = {Robust optimization methods for cardiac sparing in tangential breast IMRT}, author = {H. Mahmoudzadeh and J. Lee and T. C. Y. Chan and T. G. Purdie}, doi = {10.1118/1.4916092}, year = {2015}, date = {2015-01-02}, journal = {Medical Physics}, volume = {42}, pages = {2212-2222}, abstract = {Purpose: In left-sided tangential breast intensity modulated radiation therapy (IMRT), the heart may enter the radiation field and receive excessive radiation while the patient is breathing. The patient's breathing pattern is often irregular and unpredictable. We verify the clinical applicability of a heart-sparing robust optimization approach for breast IMRT. We compare robust optimized plans with clinical plans at free-breathing and clinical plans at deep inspiration breath-hold (DIBH) using active breathing control (ABC). Methods: Eight patients were included in the study with each patient simulated using 4D-CT. The 4D-CT image acquisition generated ten breathing phase datasets. An average scan was constructed using all the phase datasets. Two of the eight patients were also imaged at breath-hold using ABC. The 4D-CT datasets were used to calculate the accumulated dose for robust optimized and clinical plans based on deformable registration. We generated a set of simulated breathing probability mass functions, which represent the fraction of time patients spend in different breathing phases. The robust optimization method was applied to each patient using a set of dose-influence matrices extracted from the 4D-CT data and a model of the breathing motion uncertainty. The goal of the optimization models was to minimize the dose to the heart while ensuring dose constraints on the target were achieved under breathing motion uncertainty. Results: Robust optimized plans were improved or equivalent to the clinical plans in terms of heart sparing for all patients studied. The robust method reduced the accumulated heart dose (D10cc) by up to 801 cGy compared to the clinical method while also improving the coverage of the accumulated whole breast target volume. On average, the robust method reduced the heart dose (D10cc) by 364 cGy and improved the optBreast dose (D99%) by 477 cGy. In addition, the robust method had smaller deviations from the planned dose to the accumulated dose. The deviation of the accumulated dose from the planned dose for the optBreast (D99%) was 12 cGy for robust versus 445 cGy for clinical. The deviation for the heart (D10cc) was 41 cGy for robust and 320 cGy for clinical. Conclusions: The robust optimization approach can reduce heart dose compared to the clinical method at free-breathing and can potentially reduce the need for breath-hold techniques.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Purpose: In left-sided tangential breast intensity modulated radiation therapy (IMRT), the heart may enter the radiation field and receive excessive radiation while the patient is breathing. The patient's breathing pattern is often irregular and unpredictable. We verify the clinical applicability of a heart-sparing robust optimization approach for breast IMRT. We compare robust optimized plans with clinical plans at free-breathing and clinical plans at deep inspiration breath-hold (DIBH) using active breathing control (ABC). Methods: Eight patients were included in the study with each patient simulated using 4D-CT. The 4D-CT image acquisition generated ten breathing phase datasets. An average scan was constructed using all the phase datasets. Two of the eight patients were also imaged at breath-hold using ABC. The 4D-CT datasets were used to calculate the accumulated dose for robust optimized and clinical plans based on deformable registration. We generated a set of simulated breathing probability mass functions, which represent the fraction of time patients spend in different breathing phases. The robust optimization method was applied to each patient using a set of dose-influence matrices extracted from the 4D-CT data and a model of the breathing motion uncertainty. The goal of the optimization models was to minimize the dose to the heart while ensuring dose constraints on the target were achieved under breathing motion uncertainty. Results: Robust optimized plans were improved or equivalent to the clinical plans in terms of heart sparing for all patients studied. The robust method reduced the accumulated heart dose (D10cc) by up to 801 cGy compared to the clinical method while also improving the coverage of the accumulated whole breast target volume. On average, the robust method reduced the heart dose (D10cc) by 364 cGy and improved the optBreast dose (D99%) by 477 cGy. In addition, the robust method had smaller deviations from the planned dose to the accumulated dose. The deviation of the accumulated dose from the planned dose for the optBreast (D99%) was 12 cGy for robust versus 445 cGy for clinical. The deviation for the heart (D10cc) was 41 cGy for robust and 320 cGy for clinical. Conclusions: The robust optimization approach can reduce heart dose compared to the clinical method at free-breathing and can potentially reduce the need for breath-hold techniques. |
21. | Models for predicting objective function weights in prostate cancer IMRT Journal Article J. J. Boutilier, T. Lee, T. Craig, M. B. Sharpe, T. C. Y. Chan Medical Physics, Vol. 42, pp. 1586-1595, 2015. @article{ChanTCY.J021, title = {Models for predicting objective function weights in prostate cancer IMRT}, author = {J. J. Boutilier and T. Lee and T. Craig and M. B. Sharpe and T. C. Y. Chan}, doi = {10.1118/1.4914140}, year = {2015}, date = {2015-01-01}, journal = {Medical Physics}, volume = {42}, pages = {1586-1595}, abstract = {Purpose: To develop and evaluate the clinical applicability of advanced machine learning models that simultaneously predict multiple optimization objective function weights from patient geometry for intensity-modulated radiation therapy of prostate cancer. Methods: A previously developed inverse optimization method was applied retrospectively to determine optimal objective function weights for 315 treated patients. The authors used an overlap volume ratio (OV) of bladder and rectum for different PTV expansions and overlap volume histogram slopes (OVSR and OVSB for the rectum and bladder, respectively) as explanatory variables that quantify patient geometry. Using the optimal weights as ground truth, the authors trained and applied three prediction models: logistic regression (LR), multinomial logistic regression (MLR), and weighted K-nearest neighbor (KNN). The population average of the optimal objective function weights was also calculated. Results: The OV at 0.4 cm and OVSR at 0.1 cm features were found to be the most predictive of the weights. The authors observed comparable performance (i.e., no statistically significant difference) between LR, MLR, and KNN methodologies, with LR appearing to perform the best. All three machine learning models outperformed the population average by a statistically significant amount over a range of clinical metrics including bladder/rectum V53Gy, bladder/rectum V70Gy, and dose to the bladder, rectum, CTV, and PTV. When comparing the weights directly, the LR model predicted bladder and rectum weights that had, on average, a 73% and 74% relative improvement over the population average weights, respectively. The treatment plans resulting from the LR weights had, on average, a rectum V70Gy that was 35% closer to the clinical plan and a bladder V70Gy that was 29% closer, compared to the population average weights. Similar results were observed for all other clinical metrics. Conclusions: The authors demonstrated that the KNN and MLR weight prediction methodologies perform comparably to the LR model and can produce clinical quality treatment plans by simultaneously predicting multiple weights that capture trade-offs associated with sparing multiple OARs.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Purpose: To develop and evaluate the clinical applicability of advanced machine learning models that simultaneously predict multiple optimization objective function weights from patient geometry for intensity-modulated radiation therapy of prostate cancer. Methods: A previously developed inverse optimization method was applied retrospectively to determine optimal objective function weights for 315 treated patients. The authors used an overlap volume ratio (OV) of bladder and rectum for different PTV expansions and overlap volume histogram slopes (OVSR and OVSB for the rectum and bladder, respectively) as explanatory variables that quantify patient geometry. Using the optimal weights as ground truth, the authors trained and applied three prediction models: logistic regression (LR), multinomial logistic regression (MLR), and weighted K-nearest neighbor (KNN). The population average of the optimal objective function weights was also calculated. Results: The OV at 0.4 cm and OVSR at 0.1 cm features were found to be the most predictive of the weights. The authors observed comparable performance (i.e., no statistically significant difference) between LR, MLR, and KNN methodologies, with LR appearing to perform the best. All three machine learning models outperformed the population average by a statistically significant amount over a range of clinical metrics including bladder/rectum V53Gy, bladder/rectum V70Gy, and dose to the bladder, rectum, CTV, and PTV. When comparing the weights directly, the LR model predicted bladder and rectum weights that had, on average, a 73% and 74% relative improvement over the population average weights, respectively. The treatment plans resulting from the LR weights had, on average, a rectum V70Gy that was 35% closer to the clinical plan and a bladder V70Gy that was 29% closer, compared to the population average weights. Similar results were observed for all other clinical metrics. Conclusions: The authors demonstrated that the KNN and MLR weight prediction methodologies perform comparably to the LR model and can produce clinical quality treatment plans by simultaneously predicting multiple weights that capture trade-offs associated with sparing multiple OARs. |
2014 |
|
20. | A robust-CVaR optimization approach with application to breast cancer therapy Journal Article T. C. Y. Chan, H. Mahmoudzadeh, T. G. Purdie European Journal of Operational Research, Vol. 238, pp. 876-885, 2014. @article{ChanTCY.J020, title = {A robust-CVaR optimization approach with application to breast cancer therapy}, author = {T. C. Y. Chan and H. Mahmoudzadeh and T. G. Purdie}, doi = {10.1016/j.ejor.2014.04.038}, year = {2014}, date = {2014-04-26}, journal = {European Journal of Operational Research}, volume = {238}, pages = {876-885}, abstract = {We present a framework to optimize the conditional value-at-risk (CVaR) of a loss distribution under uncertainty. Our model assumes that the loss distribution is dependent on the state of some system and the fraction of time spent in each state is uncertain. We develop and compare two robust-CVaR formulations that take into account this type of uncertainty. We motivate and demonstrate our approach using radiation therapy treatment planning of breast cancer, where the uncertainty is in the patient's breathing motion and the states of the system are the phases of the patient's breathing cycle. We use a CVaR representation of the tails of the dose distribution to the points in the body and account for uncertainty in the patient's breathing pattern that affects the overall dose distribution.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We present a framework to optimize the conditional value-at-risk (CVaR) of a loss distribution under uncertainty. Our model assumes that the loss distribution is dependent on the state of some system and the fraction of time spent in each state is uncertain. We develop and compare two robust-CVaR formulations that take into account this type of uncertainty. We motivate and demonstrate our approach using radiation therapy treatment planning of breast cancer, where the uncertainty is in the patient's breathing motion and the states of the system are the phases of the patient's breathing cycle. We use a CVaR representation of the tails of the dose distribution to the points in the body and account for uncertainty in the patient's breathing pattern that affects the overall dose distribution. |
19. | Estimating oxygen needs for childhood pneumonia in developing country health systems: a new model for expecting the unexpected Journal Article B. D. Bradley, S. R. C. Howie, T. C. Y. Chan, Y.-L. Cheng PLOS ONE, Vol. 9(Article No. e89872), 2014. @article{ChanTCY.J019, title = {Estimating oxygen needs for childhood pneumonia in developing country health systems: a new model for expecting the unexpected}, author = {B. D. Bradley and S. R. C. Howie and T. C. Y. Chan and Y.-L. Cheng}, doi = {10.1371/journal.pone.0089872}, year = {2014}, date = {2014-01-25}, journal = {PLOS ONE}, volume = {9}, number = {Article No. e89872}, abstract = {Background: Planning for the reliable and cost-effective supply of a health service commodity such as medical oxygen requires an understanding of the dynamic need or 'demand' for the commodity over time. In developing country health systems, however, collecting longitudinal clinical data for forecasting purposes is very difficult. Furthermore, approaches to estimating demand for supplies based on annual averages can underestimate demand some of the time by missing temporal variability. Methods: A discrete event simulation model was developed to estimate variable demand for a health service commodity using the important example of medical oxygen for childhood pneumonia. The model is based on five key factors affecting oxygen demand: annual pneumonia admission rate, hypoxaemia prevalence, degree of seasonality, treatment duration, and oxygen flow rate. These parameters were varied over a wide range of values to generate simulation results for different settings. Total oxygen volume, peak patient load, and hours spent above average-based demand estimates were computed for both low and high seasons. Findings: Oxygen demand estimates based on annual average values of demand factors can often severely underestimate actual demand. For scenarios with high hypoxaemia prevalence and degree of seasonality, demand can exceed average levels up to 68% of the time. Even for typical scenarios, demand may exceed three times the average level for several hours per day. Peak patient load is sensitive to hypoxaemia prevalence, whereas time spent at such peak loads is strongly influenced by degree of seasonality. Conclusion: A theoretical study is presented whereby a simulation approach to estimating oxygen demand is used to better capture temporal variability compared to standard average-based approaches. This approach provides better grounds for health service planning, including decision-making around technologies for oxygen delivery. Beyond oxygen, this approach is widely applicable to other areas of resource and technology planning in developing country health systems.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Background: Planning for the reliable and cost-effective supply of a health service commodity such as medical oxygen requires an understanding of the dynamic need or 'demand' for the commodity over time. In developing country health systems, however, collecting longitudinal clinical data for forecasting purposes is very difficult. Furthermore, approaches to estimating demand for supplies based on annual averages can underestimate demand some of the time by missing temporal variability. Methods: A discrete event simulation model was developed to estimate variable demand for a health service commodity using the important example of medical oxygen for childhood pneumonia. The model is based on five key factors affecting oxygen demand: annual pneumonia admission rate, hypoxaemia prevalence, degree of seasonality, treatment duration, and oxygen flow rate. These parameters were varied over a wide range of values to generate simulation results for different settings. Total oxygen volume, peak patient load, and hours spent above average-based demand estimates were computed for both low and high seasons. Findings: Oxygen demand estimates based on annual average values of demand factors can often severely underestimate actual demand. For scenarios with high hypoxaemia prevalence and degree of seasonality, demand can exceed average levels up to 68% of the time. Even for typical scenarios, demand may exceed three times the average level for several hours per day. Peak patient load is sensitive to hypoxaemia prevalence, whereas time spent at such peak loads is strongly influenced by degree of seasonality. Conclusion: A theoretical study is presented whereby a simulation approach to estimating oxygen demand is used to better capture temporal variability compared to standard average-based approaches. This approach provides better grounds for health service planning, including decision-making around technologies for oxygen delivery. Beyond oxygen, this approach is widely applicable to other areas of resource and technology planning in developing country health systems. |
18. | Generalized inverse multi-objective optimization with application to cancer therapy Journal Article T. C. Y. Chan, T. Craig, T. Lee, M. B. Sharpe Operations Research, Vol. 62, pp. 680-695, 2014. @article{ChanTCY.J018, title = {Generalized inverse multi-objective optimization with application to cancer therapy}, author = {T. C. Y. Chan and T. Craig and T. Lee and M. B. Sharpe}, doi = {10.1287/opre.2014.1267}, year = {2014}, date = {2014-01-03}, journal = {Operations Research}, volume = {62}, pages = {680-695}, abstract = {We generalize the standard method of solving inverse optimization problems to allow for the solution of inverse problems that would otherwise be ill posed or infeasible. In multiobjective linear optimization, given a solution that is not a weakly efficient solution to the forward problem, our method generates objective function weights that make the given solution a near-weakly efficient solution. Our generalized inverse optimization model specializes to the standard model when the given solution is weakly efficient and retains the complexity of the underlying forward problem. We provide a novel interpretation of our inverse formulation as the dual of the well-known Benson's method and by doing so develop a new connection between inverse optimization and Pareto surface approximation techniques. We apply our method to prostate cancer data obtained from Princess Margaret Cancer Centre in Toronto, Canada. We demonstrate that clinically acceptable treatments can be generated using a small number of objective functions and inversely optimized weights—current treatments are designed using a complex formulation with a large parameter space in a trial-and-error reoptimization process. We also show that our method can identify objective functions that are most influential in treatment plan optimization.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We generalize the standard method of solving inverse optimization problems to allow for the solution of inverse problems that would otherwise be ill posed or infeasible. In multiobjective linear optimization, given a solution that is not a weakly efficient solution to the forward problem, our method generates objective function weights that make the given solution a near-weakly efficient solution. Our generalized inverse optimization model specializes to the standard model when the given solution is weakly efficient and retains the complexity of the underlying forward problem. We provide a novel interpretation of our inverse formulation as the dual of the well-known Benson's method and by doing so develop a new connection between inverse optimization and Pareto surface approximation techniques. We apply our method to prostate cancer data obtained from Princess Margaret Cancer Centre in Toronto, Canada. We demonstrate that clinically acceptable treatments can be generated using a small number of objective functions and inversely optimized weights—current treatments are designed using a complex formulation with a large parameter space in a trial-and-error reoptimization process. We also show that our method can identify objective functions that are most influential in treatment plan optimization. |
17. | A stochastic model for tumor geometry evolution during radiation therapy in cervical cancer Journal Article Y. Liu, T. C. Y. Chan, C.-G. Lee, Y. B. Cho, M. K. Islam Medical Physics, Vol. 41(Article No. 021705), 2014. @article{ChanTCY.J017, title = {A stochastic model for tumor geometry evolution during radiation therapy in cervical cancer}, author = {Y. Liu and T. C. Y. Chan and C.-G. Lee and Y. B. Cho and M. K. Islam}, doi = {10.1118/1.4859355}, year = {2014}, date = {2014-01-02}, journal = {Medical Physics}, volume = {41}, number = {Article No. 021705}, abstract = {Purpose: To develop mathematical models to predict the evolution of tumor geometry in cervical cancer undergoing radiation therapy. Methods: The authors develop two mathematical models to estimate tumor geometry change: a Markov model and an isomorphic shrinkage model. The Markov model describes tumor evolution by investigating the change in state (either tumor or nontumor) of voxels on the tumor surface. It assumes that the evolution follows a Markov process. Transition probabilities are obtained using maximum likelihood estimation and depend on the states of neighboring voxels. The isomorphic shrinkage model describes tumor shrinkage or growth in terms of layers of voxels on the tumor surface, instead of modeling individual voxels. The two proposed models were applied to data from 29 cervical cancer patients treated at Princess Margaret Cancer Centre and then compared to a constant volume approach. Model performance was measured using sensitivity and specificity. Results: The Markov model outperformed both the isomorphic shrinkage and constant volume models in terms of the trade-off between sensitivity (target coverage) and specificity (normal tissue sparing). Generally, the Markov model achieved a few percentage points in improvement in either sensitivity or specificity compared to the other models. The isomorphic shrinkage model was comparable to the Markov approach under certain parameter settings. Convex tumor shapes were easier to predict. Conclusions: By modeling tumor geometry change at the voxel level using a probabilistic model, improvements in target coverage and normal tissue sparing are possible. Our Markov model is flexible and has tunable parameters to adjust model performance to meet a range of criteria. Such a model may support the development of an adaptive paradigm for radiation therapy of cervical cancer.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Purpose: To develop mathematical models to predict the evolution of tumor geometry in cervical cancer undergoing radiation therapy. Methods: The authors develop two mathematical models to estimate tumor geometry change: a Markov model and an isomorphic shrinkage model. The Markov model describes tumor evolution by investigating the change in state (either tumor or nontumor) of voxels on the tumor surface. It assumes that the evolution follows a Markov process. Transition probabilities are obtained using maximum likelihood estimation and depend on the states of neighboring voxels. The isomorphic shrinkage model describes tumor shrinkage or growth in terms of layers of voxels on the tumor surface, instead of modeling individual voxels. The two proposed models were applied to data from 29 cervical cancer patients treated at Princess Margaret Cancer Centre and then compared to a constant volume approach. Model performance was measured using sensitivity and specificity. Results: The Markov model outperformed both the isomorphic shrinkage and constant volume models in terms of the trade-off between sensitivity (target coverage) and specificity (normal tissue sparing). Generally, the Markov model achieved a few percentage points in improvement in either sensitivity or specificity compared to the other models. The isomorphic shrinkage model was comparable to the Markov approach under certain parameter settings. Convex tumor shapes were easier to predict. Conclusions: By modeling tumor geometry change at the voxel level using a probabilistic model, improvements in target coverage and normal tissue sparing are possible. Our Markov model is flexible and has tunable parameters to adjust model performance to meet a range of criteria. Such a model may support the development of an adaptive paradigm for radiation therapy of cervical cancer. |
16. | A new mathematical programming approach to optimize wind farm layouts Journal Article S. D. O. Turner, D. A. Romero, P. Y. Zhang, C. H. Amon, T. C. Y. Chan Renewable Energy, Vol. 63, pp. 674-680, 2014. @article{ChanTCY.J016, title = {A new mathematical programming approach to optimize wind farm layouts}, author = {S. D. O. Turner and D. A. Romero and P. Y. Zhang and C. H. Amon and T. C. Y. Chan}, doi = {10.1016/j.renene.2013.10.023}, year = {2014}, date = {2014-01-01}, journal = {Renewable Energy}, volume = {63}, pages = {674-680}, abstract = {The optimal placement of turbines in a wind farm is critical to the maximization of power production. In this paper, we develop a new mathematical programming approach for wind farm layout optimization. We use Jensen's wake decay model to represent multi-turbine wake effects. We develop mixed integer linear and quadratic optimization formulations and apply them to several example layout cases in the literature. Compared to previous approaches, our models produce layouts that tend to be more symmetric and that generate slightly more power. Our formulations solve quickly, allowing a decision maker to efficiently explore the impact of different turbine densities in a wind farm.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The optimal placement of turbines in a wind farm is critical to the maximization of power production. In this paper, we develop a new mathematical programming approach for wind farm layout optimization. We use Jensen's wake decay model to represent multi-turbine wake effects. We develop mixed integer linear and quadratic optimization formulations and apply them to several example layout cases in the literature. Compared to previous approaches, our models produce layouts that tend to be more symmetric and that generate slightly more power. Our formulations solve quickly, allowing a decision maker to efficiently explore the impact of different turbine densities in a wind farm. |
2013 |
|
15. | Predicting objective function weights from patient anatomy in prostate IMRT treatment planning Journal Article T. Lee, M. Hummad, T. C. Y. Chan, T. Craig, M. B. Sharpe Medical Physics, Vol. 40(Article No. 121706), 2013. @article{ChanTCY.J015, title = {Predicting objective function weights from patient anatomy in prostate IMRT treatment planning}, author = {T. Lee and M. Hummad and T. C. Y. Chan and T. Craig and M. B. Sharpe}, doi = {10.1118/1.4828841}, year = {2013}, date = {2013-10-16}, journal = {Medical Physics}, volume = {40}, number = {Article No. 121706}, abstract = {Purpose: Intensity-modulated radiation therapy (IMRT) treatment planning typically combines multiple criteria into a single objective function by taking a weighted sum. The authors propose a statistical model that predicts objective function weights from patient anatomy for prostate IMRT treatment planning. This study provides a proof of concept for geometry-driven weight determination. Methods: A previously developed inverse optimization method (IOM) was used to generate optimal objective function weights for 24 patients using their historical treatment plans (i.e., dose distributions). These IOM weights were around 1% for each of the femoral heads, while bladder and rectum weights varied greatly between patients. A regression model was developed to predict a patient's rectum weight using the ratio of the overlap volume of the rectum and bladder with the planning target volume at a 1 cm expansion as the independent variable. The femoral head weights were fixed to 1% each and the bladder weight was calculated as one minus the rectum and femoral head weights. The model was validated using leave-one-out cross validation. Objective values and dose distributions generated through inverse planning using the predicted weights were compared to those generated using the original IOM weights, as well as an average of the IOM weights across all patients. Results: The IOM weight vectors were on average six times closer to the predicted weight vectors than to the average weight vector, using l_2 distance. Likewise, the bladder and rectum objective values achieved by the predicted weights were more similar to the objective values achieved by the IOM weights. The difference in objective value performance between the predicted and average weights was statistically significant according to a one-sided sign test. For all patients, the difference in rectum V54.3 Gy, rectum V70.0 Gy, bladder V54.3 Gy, and bladder V70.0 Gy values between the dose distributions generated by the predicted weights and IOM weights was less than 5 percentage points. Similarly, the difference in femoral head V54.3 Gy values between the two dose distributions was less than 5 percentage points for all but one patient. Conclusions: This study demonstrates a proof of concept that patient anatomy can be used to predict appropriate objective function weights for treatment planning. In the long term, such geometry-driven weights may serve as a starting point for iterative treatment plan design or may provide information about the most clinically relevant region of the Pareto surface to explore.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Purpose: Intensity-modulated radiation therapy (IMRT) treatment planning typically combines multiple criteria into a single objective function by taking a weighted sum. The authors propose a statistical model that predicts objective function weights from patient anatomy for prostate IMRT treatment planning. This study provides a proof of concept for geometry-driven weight determination. Methods: A previously developed inverse optimization method (IOM) was used to generate optimal objective function weights for 24 patients using their historical treatment plans (i.e., dose distributions). These IOM weights were around 1% for each of the femoral heads, while bladder and rectum weights varied greatly between patients. A regression model was developed to predict a patient's rectum weight using the ratio of the overlap volume of the rectum and bladder with the planning target volume at a 1 cm expansion as the independent variable. The femoral head weights were fixed to 1% each and the bladder weight was calculated as one minus the rectum and femoral head weights. The model was validated using leave-one-out cross validation. Objective values and dose distributions generated through inverse planning using the predicted weights were compared to those generated using the original IOM weights, as well as an average of the IOM weights across all patients. Results: The IOM weight vectors were on average six times closer to the predicted weight vectors than to the average weight vector, using l_2 distance. Likewise, the bladder and rectum objective values achieved by the predicted weights were more similar to the objective values achieved by the IOM weights. The difference in objective value performance between the predicted and average weights was statistically significant according to a one-sided sign test. For all patients, the difference in rectum V54.3 Gy, rectum V70.0 Gy, bladder V54.3 Gy, and bladder V70.0 Gy values between the dose distributions generated by the predicted weights and IOM weights was less than 5 percentage points. Similarly, the difference in femoral head V54.3 Gy values between the two dose distributions was less than 5 percentage points for all but one patient. Conclusions: This study demonstrates a proof of concept that patient anatomy can be used to predict appropriate objective function weights for treatment planning. In the long term, such geometry-driven weights may serve as a starting point for iterative treatment plan design or may provide information about the most clinically relevant region of the Pareto surface to explore. |
14. | Examining the LEED rating system using inverse optimization Journal Article S. D. O. Turner, T. C. Y. Chan Journal of Solar Energy Engineering, Vol. 135(Article No. 040901), 2013. @article{ChanTCY.J014, title = {Examining the LEED rating system using inverse optimization}, author = {S. D. O. Turner and T. C. Y. Chan}, doi = {10.1115/1.4025221}, year = {2013}, date = {2013-08-02}, journal = {Journal of Solar Energy Engineering}, volume = {135}, number = {Article No. 040901}, abstract = {The Leadership in Energy and Environmental Design (LEED) rating system is the most recognized green building certification program in North America. In order to be LEED certified, a building must earn a sufficient number of points, which are obtained through achieving certain credits or design elements. In LEED versions 1 and 2, each credit was worth one point. In version 3, the LEED system changed so that certain credits were worth more than one point. In this study, we develop an inverse optimization approach to examine how building designers intrinsically valued design elements in LEED version 2. Because of the change in the point system between version 2 and version 3, we aim to determine whether building designers actually valued each credit equally, and if not, whether their valuations matched the values in version 3. Due to the large dimensionality of the inverse optimization problem, we develop an approximation to improve tractability. We apply our method to 306 different LEED-certified buildings in the continental United States. We find that building designers did not value all credits equally and that other factors such as cost, building type, and size, and certification level play a role in how the credits are valued. Overall, inverse optimization may provide a new method to assess historical data and support the design of future versions of LEED.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The Leadership in Energy and Environmental Design (LEED) rating system is the most recognized green building certification program in North America. In order to be LEED certified, a building must earn a sufficient number of points, which are obtained through achieving certain credits or design elements. In LEED versions 1 and 2, each credit was worth one point. In version 3, the LEED system changed so that certain credits were worth more than one point. In this study, we develop an inverse optimization approach to examine how building designers intrinsically valued design elements in LEED version 2. Because of the change in the point system between version 2 and version 3, we aim to determine whether building designers actually valued each credit equally, and if not, whether their valuations matched the values in version 3. Due to the large dimensionality of the inverse optimization problem, we develop an approximation to improve tractability. We apply our method to 306 different LEED-certified buildings in the continental United States. We find that building designers did not value all credits equally and that other factors such as cost, building type, and size, and certification level play a role in how the credits are valued. Overall, inverse optimization may provide a new method to assess historical data and support the design of future versions of LEED. |
13. | Deal or No Deal: A spreadsheet game to introduce decision making under uncertainty Journal Article T. C. Y. Chan INFORMS Transactions on Education, Vol. 14, pp. 53-60, 2013. @article{ChanTCY.J013, title = {Deal or No Deal: A spreadsheet game to introduce decision making under uncertainty}, author = {T. C. Y. Chan}, doi = {10.1287/ited.2013.0104}, year = {2013}, date = {2013-07-01}, journal = {INFORMS Transactions on Education}, volume = {14}, pages = {53-60}, abstract = {In this paper, I introduce a spreadsheet-based implementation of the game show Deal or No Deal. I describe how this game can be used in class to illuminate topics in decision making under uncertainty to students in both engineering and business. I show that specific scenarios encountered in the game can lead to rich discussions on topics like risk, utility, and probability. The game is easy to learn and play in class and usually receives a strong positive response from students.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In this paper, I introduce a spreadsheet-based implementation of the game show Deal or No Deal. I describe how this game can be used in class to illuminate topics in decision making under uncertainty to students in both engineering and business. I show that specific scenarios encountered in the game can lead to rich discussions on topics like risk, utility, and probability. The game is easy to learn and play in class and usually receives a strong positive response from students. |
12. | Adaptive and robust radiation therapy optimization for lung cancer Journal Article T. C. Y. Chan, V. V. Mišić European Journal of Operational Research, Vol. 231, pp. 745-756, 2013. @article{ChanTCY.J012, title = {Adaptive and robust radiation therapy optimization for lung cancer}, author = {T. C. Y. Chan and V. V. Mišić}, doi = {10.1016/j.ejor.2013.06.003}, year = {2013}, date = {2013-06-03}, journal = {European Journal of Operational Research}, volume = {231}, pages = {745-756}, abstract = {A previous approach to robust intensity-modulated radiation therapy (IMRT) treatment planning for moving tumors in the lung involves solving a single planning problem before the start of treatment and using the resulting solution in all of the subsequent treatment sessions. In this paper, we develop an adaptive robust optimization approach to IMRT treatment planning for lung cancer, where information gathered in prior treatment sessions is used to update the uncertainty set and guide the reoptimization of the treatment for the next session. Such an approach allows for the estimate of the uncertain effect to improve as the treatment goes on and represents a generalization of existing robust optimization and adaptive radiation therapy methodologies. Our method is computationally tractable, as it involves solving a sequence of linear optimization problems. We present computational results for a lung cancer patient case and show that using our adaptive robust method, it is possible to attain an improvement over the traditional robust approach in both tumor coverage and organ sparing simultaneously. We also prove that under certain conditions our adaptive robust method is asymptotically optimal, which provides insight into the performance observed in our computational study. The essence of our method – solving a sequence of single-stage robust optimization problems, with the uncertainty set updated each time – can potentially be applied to other problems that involve multi-stage decisions to be made under uncertainty.}, keywords = {}, pubstate = {published}, tppubtype = {article} } A previous approach to robust intensity-modulated radiation therapy (IMRT) treatment planning for moving tumors in the lung involves solving a single planning problem before the start of treatment and using the resulting solution in all of the subsequent treatment sessions. In this paper, we develop an adaptive robust optimization approach to IMRT treatment planning for lung cancer, where information gathered in prior treatment sessions is used to update the uncertainty set and guide the reoptimization of the treatment for the next session. Such an approach allows for the estimate of the uncertain effect to improve as the treatment goes on and represents a generalization of existing robust optimization and adaptive radiation therapy methodologies. Our method is computationally tractable, as it involves solving a sequence of linear optimization problems. We present computational results for a lung cancer patient case and show that using our adaptive robust method, it is possible to attain an improvement over the traditional robust approach in both tumor coverage and organ sparing simultaneously. We also prove that under certain conditions our adaptive robust method is asymptotically optimal, which provides insight into the performance observed in our computational study. The essence of our method – solving a sequence of single-stage robust optimization problems, with the uncertainty set updated each time – can potentially be applied to other problems that involve multi-stage decisions to be made under uncertainty. |
11. | Modeling the impact of public access defibrillator range on public location cardiac arrest coverage Journal Article A. A. Siddiq, S. C. Brooks, T. C. Y. Chan Resuscitation, Vol. 84, pp. 904-909, 2013. @article{ChanTCY.J011, title = {Modeling the impact of public access defibrillator range on public location cardiac arrest coverage}, author = {A. A. Siddiq and S. C. Brooks and T. C. Y. Chan}, doi = {10.1016/j.resuscitation.2012.11.019}, year = {2013}, date = {2013-04-01}, journal = {Resuscitation}, volume = {84}, pages = {904-909}, abstract = {Background: Public access defibrillation with automated external defibrillators (AEDs) can improve survival from out-of-hospital cardiac arrests (OHCA) occurring in public. Increasing the effective range of AEDs may improve coverage for public location OHCAs. Objective: To quantify the relationship between AED effective range and public location cardiac arrest coverage. Methods: This was a retrospective cohort study using the Resuscitation Outcomes Consortium Epistry database. We included all public-location, atraumatic, EMS-attended OHCAs in Toronto, Canada between December 16, 2005 and July 15, 2010. We ran a mathematical model for AED placement that maximizes coverage of historical public OHCAs given pre-specified values of AED effective range and the number of locations to place AEDs. Locations of all non-residential buildings were obtained from the City of Toronto and used as candidate sites for AED placement. Coverage was evaluated for range values from 10 to 300 m and number of AED locations from 10 to 200, both in increments of 10, for a total of 600 unique scenarios. Coverage from placing AEDs in all public buildings was also measured. Results: There were 1310 public location OHCAs during the study period, with 25,851 non-residential buildings identified as candidate sites for AED placement. Cardiac arrest coverage increased with AED effective range, with improvements in coverage diminishing at higher ranges. For example, for a deployment of 200 AED locations, increasing effective range from 100 m to 200 m covered an additional 15% of cardiac arrests, whereas increasing range further from 200 m to 300 m covered an additional 10%. Placing an AED in each of the 25,851 public buildings resulted in coverage of 50% and 95% under assumed effective ranges of 50 m and 300 m, respectively. Conclusion: Increasing AED effective range can improve cardiac arrest coverage. Mathematical models can help evaluate the potential impact of initiatives which increase AED range.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Background: Public access defibrillation with automated external defibrillators (AEDs) can improve survival from out-of-hospital cardiac arrests (OHCA) occurring in public. Increasing the effective range of AEDs may improve coverage for public location OHCAs. Objective: To quantify the relationship between AED effective range and public location cardiac arrest coverage. Methods: This was a retrospective cohort study using the Resuscitation Outcomes Consortium Epistry database. We included all public-location, atraumatic, EMS-attended OHCAs in Toronto, Canada between December 16, 2005 and July 15, 2010. We ran a mathematical model for AED placement that maximizes coverage of historical public OHCAs given pre-specified values of AED effective range and the number of locations to place AEDs. Locations of all non-residential buildings were obtained from the City of Toronto and used as candidate sites for AED placement. Coverage was evaluated for range values from 10 to 300 m and number of AED locations from 10 to 200, both in increments of 10, for a total of 600 unique scenarios. Coverage from placing AEDs in all public buildings was also measured. Results: There were 1310 public location OHCAs during the study period, with 25,851 non-residential buildings identified as candidate sites for AED placement. Cardiac arrest coverage increased with AED effective range, with improvements in coverage diminishing at higher ranges. For example, for a deployment of 200 AED locations, increasing effective range from 100 m to 200 m covered an additional 15% of cardiac arrests, whereas increasing range further from 200 m to 300 m covered an additional 10%. Placing an AED in each of the 25,851 public buildings resulted in coverage of 50% and 95% under assumed effective ranges of 50 m and 300 m, respectively. Conclusion: Increasing AED effective range can improve cardiac arrest coverage. Mathematical models can help evaluate the potential impact of initiatives which increase AED range. |
10. | Identifying locations for public access defibrillators using mathematical optimization Journal Article T. C. Y. Chan, H. Li, G. Lebovic, S. K. Tang, J. Y. T. Chan, H. C. K. Cheng, L. J. Morrison, S. C. Brooks Circulation, Vol. 127, pp. 1801-1809, 2013. @article{ChanTCY.J010, title = {Identifying locations for public access defibrillators using mathematical optimization}, author = {T. C. Y. Chan and H. Li and G. Lebovic and S. K. Tang and J. Y. T. Chan and H. C. K. Cheng and L. J. Morrison and S. C. Brooks}, doi = {10.1161/CIRCULATIONAHA.113.001953}, year = {2013}, date = {2013-03-21}, journal = {Circulation}, volume = {127}, pages = {1801-1809}, abstract = {Background—Geospatial methods using mathematical optimization to identify clusters of cardiac arrests and prioritize public locations for defibrillator deployment have not been studied. Our objective was to develop such a method and test its performance against a population-guided approach. Methods and Results—All public location cardiac arrests in Toronto, Ontario, Canada, from December 16, 2005, to July 15, 2010, and all automated external defibrillator (AED) locations registered with Toronto Emergency Medical Services as of September 2009 were plotted geographically. Current AED coverage was quantified by determining the number of cardiac arrests occurring within 100 m of a registered AED. Clusters of cardiac arrests without a registered AED within 100 m were identified. With the use of mathematical optimization techniques, cardiac arrest coverage improvements were computed and shown to be superior to results from a population-guided deployment method. There were 1310 eligible public location cardiac arrests and 1669 registered AEDs. Of the eligible cardiac arrests, 304 were within 100 m of at least 1 registered AED (23% coverage). The average distance from a cardiac arrest to the closest AED was 281 m. With AEDs deployed in the top 30 locations, an additional 112 historical cardiac arrests would be covered (32% total coverage), and the average distance to the closest AED would be 262 m. Conclusions—Geographic clusters of cardiac arrests can be easily identified and prioritized with the use of mathematical modeling. Optimized AED deployment can increase cardiac arrest coverage and decrease the distance to the closest AED. Mathematical modeling can augment public AED deployment programs.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Background—Geospatial methods using mathematical optimization to identify clusters of cardiac arrests and prioritize public locations for defibrillator deployment have not been studied. Our objective was to develop such a method and test its performance against a population-guided approach. Methods and Results—All public location cardiac arrests in Toronto, Ontario, Canada, from December 16, 2005, to July 15, 2010, and all automated external defibrillator (AED) locations registered with Toronto Emergency Medical Services as of September 2009 were plotted geographically. Current AED coverage was quantified by determining the number of cardiac arrests occurring within 100 m of a registered AED. Clusters of cardiac arrests without a registered AED within 100 m were identified. With the use of mathematical optimization techniques, cardiac arrest coverage improvements were computed and shown to be superior to results from a population-guided deployment method. There were 1310 eligible public location cardiac arrests and 1669 registered AEDs. Of the eligible cardiac arrests, 304 were within 100 m of at least 1 registered AED (23% coverage). The average distance from a cardiac arrest to the closest AED was 281 m. With AEDs deployed in the top 30 locations, an additional 112 historical cardiac arrests would be covered (32% total coverage), and the average distance to the closest AED would be 262 m. Conclusions—Geographic clusters of cardiac arrests can be easily identified and prioritized with the use of mathematical modeling. Optimized AED deployment can increase cardiac arrest coverage and decrease the distance to the closest AED. Mathematical modeling can augment public AED deployment programs. |
9. | S. C. Brooks, J. H. Hsu, S. K. Tang, R. Jeyakumar, T. C. Y. Chan Annals of Emergency Medicine, Vol. 61, pp. 530-538, 2013. @article{ChanTCY.J009, title = {Determining risk for out-of-hospital cardiac arrest by location type in a Canadian urban setting to guide future public access defibrillator placement}, author = {S. C. Brooks and J. H. Hsu and S. K. Tang and R. Jeyakumar and T. C. Y. Chan}, doi = {10.1016/j.annemergmed.2012.10.037}, year = {2013}, date = {2013-01-02}, journal = {Annals of Emergency Medicine}, volume = {61}, pages = {530-538}, abstract = {Study objective: Automated external defibrillator use by lay bystanders during out-of-hospital cardiac arrest rarely occurs but can improve survival. We seek to estimate risk for out-of-hospital cardiac arrest by location type and evaluate current automated external defibrillator deployment in a Canadian urban setting to guide future automated external defibrillator deployment. Methods: This was a retrospective analysis of a population-based out-of-hospital cardiac arrest database. We included consecutive public location, nontraumatic, out-of-hospital cardiac arrests occurring in Toronto from January 1, 2006, to June 30, 2010, captured in the Resuscitation Outcomes Consortium Epistry database. Two investigators independently categorized each out-of-hospital cardiac arrest and automated external defibrillator location into one of 38 categories. Total site counts in each location category were used to estimate average annual per-site cardiac arrest incidence and determine the relative automated external defibrillator coverage for each location type. Results: There were 608 eligible out-of-hospital cardiac arrest cases. The top 5 location categories by average annual out-of-hospital cardiac arrests per site were race track/casino (0.67; 95% confidence interval [CI] 0 to 1.63), jail (0.62; 95% CI 0.3 to 1.06), hotel/motel (0.15; 95% CI 0.12 to 0.18), hostel/shelter (0.14; 95% CI 0.067 to 0.19), and convention center (0.11; 95% CI 0 to 0.43). Although schools were relatively lower risk for cardiac arrest, they represented 72.5% of automated external defibrillator–covered locations in the study region. Some higher-risk location types such as hotel/motel, hostel/shelter, and rail station were severely underrepresented with respect to automated external defibrillator coverage. Conclusion: We have identified types of locations with higher per-site risk for cardiac arrest relative to others. We have also identified potential mismatches between cardiac arrest risk by location type and registered automated external defibrillator distribution in a Canadian urban setting.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Study objective: Automated external defibrillator use by lay bystanders during out-of-hospital cardiac arrest rarely occurs but can improve survival. We seek to estimate risk for out-of-hospital cardiac arrest by location type and evaluate current automated external defibrillator deployment in a Canadian urban setting to guide future automated external defibrillator deployment. Methods: This was a retrospective analysis of a population-based out-of-hospital cardiac arrest database. We included consecutive public location, nontraumatic, out-of-hospital cardiac arrests occurring in Toronto from January 1, 2006, to June 30, 2010, captured in the Resuscitation Outcomes Consortium Epistry database. Two investigators independently categorized each out-of-hospital cardiac arrest and automated external defibrillator location into one of 38 categories. Total site counts in each location category were used to estimate average annual per-site cardiac arrest incidence and determine the relative automated external defibrillator coverage for each location type. Results: There were 608 eligible out-of-hospital cardiac arrest cases. The top 5 location categories by average annual out-of-hospital cardiac arrests per site were race track/casino (0.67; 95% confidence interval [CI] 0 to 1.63), jail (0.62; 95% CI 0.3 to 1.06), hotel/motel (0.15; 95% CI 0.12 to 0.18), hostel/shelter (0.14; 95% CI 0.067 to 0.19), and convention center (0.11; 95% CI 0 to 0.43). Although schools were relatively lower risk for cardiac arrest, they represented 72.5% of automated external defibrillator–covered locations in the study region. Some higher-risk location types such as hotel/motel, hostel/shelter, and rail station were severely underrepresented with respect to automated external defibrillator coverage. Conclusion: We have identified types of locations with higher per-site risk for cardiac arrest relative to others. We have also identified potential mismatches between cardiac arrest risk by location type and registered automated external defibrillator distribution in a Canadian urban setting. |
8. | Motion-compensating intensity maps in intensity-modulated radiation therapy Journal Article T. C. Y. Chan IIE Transactions on Healthcare Systems Engineering, Vol. 3, pp. 1-22, 2013. @article{ChanTCY.J008, title = {Motion-compensating intensity maps in intensity-modulated radiation therapy}, author = {T. C. Y. Chan}, doi = {10.1080/19488300.2012.749436}, year = {2013}, date = {2013-01-01}, journal = {IIE Transactions on Healthcare Systems Engineering}, volume = {3}, pages = {1-22}, abstract = {Managing the effects of tumor motion during radiation therapy is critical to ensuring that a robust treatment is delivered to a cancer patient. Tumor motion due to patient breathing may result in the tumor moving in and out of the beam of radiation, causing the edge of the tumor to be underdosed. One approach to managing the effects of motion is to increase the intensity of the radiation delivered at the edge of the tumor—an edge-enhanced intensity map—which decreases the likelihood of underdosing that area. A second approach is to use a margin, which increases the volume of irradiation surrounding the tumor, also with the aim of reducing the risk of underdosage. In this paper, we characterize the structure of optimal solutions within these two classes of intensity maps. We prove that the ratio of the tumor size to the standard deviation of motion characterizes the structure of an optimal edge-enhanced intensity map. Similar results are derived for a three-dimensional margin case. Furthermore, we extend our analysis by considering a robust version of the problem where the parameters of the underlying motion distribution are not known with certainty, but lie in pre-specified intervals. We show that the robust counterpart of the uncertain 3D margin problem has a very similar structure to the nominal (no uncertainty) problem.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Managing the effects of tumor motion during radiation therapy is critical to ensuring that a robust treatment is delivered to a cancer patient. Tumor motion due to patient breathing may result in the tumor moving in and out of the beam of radiation, causing the edge of the tumor to be underdosed. One approach to managing the effects of motion is to increase the intensity of the radiation delivered at the edge of the tumor—an edge-enhanced intensity map—which decreases the likelihood of underdosing that area. A second approach is to use a margin, which increases the volume of irradiation surrounding the tumor, also with the aim of reducing the risk of underdosage. In this paper, we characterize the structure of optimal solutions within these two classes of intensity maps. We prove that the ratio of the tumor size to the standard deviation of motion characterizes the structure of an optimal edge-enhanced intensity map. Similar results are derived for a three-dimensional margin case. Furthermore, we extend our analysis by considering a robust version of the problem where the parameters of the underlying motion distribution are not known with certainty, but lie in pre-specified intervals. We show that the robust counterpart of the uncertain 3D margin problem has a very similar structure to the nominal (no uncertainty) problem. |
2012 |
|
7. | Quantifying the contribution of NHL player types to team performance Journal Article T. C. Y. Chan, J. A. Cho, D. C. Novati Interfaces, Vol. 42, pp. 131-145, 2012. @article{ChanTCY.J007, title = {Quantifying the contribution of NHL player types to team performance}, author = {T. C. Y. Chan and J. A. Cho and D. C. Novati}, doi = {10.1287/inte.1110.0612}, year = {2012}, date = {2012-03-01}, journal = {Interfaces}, volume = {42}, pages = {131-145}, abstract = {In this paper, we use k-means clustering to define distinct player types for each of the three positions on a National Hockey League (NHL) team and then use regression to determine a quantitative relationship between team performance and the player types identified in the clustering. Using NHL regular-season data from 2005–2010, we identify four forward types, four defensemen types, and three goalie types. Goalies tend to contribute the most to team performance, followed by forwards and then defensemen. We also show that once we account for salary cap and playing-time information, the value of different player types may become similar. Lastly, we illustrate how to use the regression results to analyze trades and their impact on team performance.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In this paper, we use k-means clustering to define distinct player types for each of the three positions on a National Hockey League (NHL) team and then use regression to determine a quantitative relationship between team performance and the player types identified in the clustering. Using NHL regular-season data from 2005–2010, we identify four forward types, four defensemen types, and three goalie types. Goalies tend to contribute the most to team performance, followed by forwards and then defensemen. We also show that once we account for salary cap and playing-time information, the value of different player types may become similar. Lastly, we illustrate how to use the regression results to analyze trades and their impact on team performance. |
2010 |
|
6. | Optimal margin and edge-enhanced intensity maps in the presence of motion and uncertainty Journal Article T. C. Y. Chan, J. N. Tsitsiklis, T. Bortfeld Physics in Medicine and Biology, Vol. 55, pp. 515-533, 2010. @article{ChanTCY.J006, title = {Optimal margin and edge-enhanced intensity maps in the presence of motion and uncertainty}, author = {T. C. Y. Chan and J. N. Tsitsiklis and T. Bortfeld}, doi = {10.1088/0031-9155/55/2/012}, year = {2010}, date = {2010-01-01}, journal = {Physics in Medicine and Biology}, volume = {55}, pages = {515-533}, abstract = {In radiation therapy, intensity maps involving margins have long been used to counteract the effects of dose blurring arising from motion. More recently, intensity maps with increased intensity near the edge of the tumour (edge enhancements) have been studied to evaluate their ability to offset similar effects that affect tumour coverage. In this paper, we present a mathematical methodology to derive margin and edge-enhanced intensity maps that aim to provide tumour coverage while delivering minimum total dose. We show that if the tumour is at most about twice as large as the standard deviation of the blurring distribution, the optimal intensity map is a pure scaling increase of the static intensity map without any margins or edge enhancements. Otherwise, if the tumour size is roughly twice (or more) the standard deviation of motion, then margins and edge enhancements are preferred, and we present formulae to calculate the exact dimensions of these intensity maps. Furthermore, we extend our analysis to include scenarios where the parameters of the motion distribution are not known with certainty, but rather can take any value in some range. In these cases, we derive a similar threshold to determine the structure of an optimal margin intensity map.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In radiation therapy, intensity maps involving margins have long been used to counteract the effects of dose blurring arising from motion. More recently, intensity maps with increased intensity near the edge of the tumour (edge enhancements) have been studied to evaluate their ability to offset similar effects that affect tumour coverage. In this paper, we present a mathematical methodology to derive margin and edge-enhanced intensity maps that aim to provide tumour coverage while delivering minimum total dose. We show that if the tumour is at most about twice as large as the standard deviation of the blurring distribution, the optimal intensity map is a pure scaling increase of the static intensity map without any margins or edge enhancements. Otherwise, if the tumour size is roughly twice (or more) the standard deviation of motion, then margins and edge enhancements are preferred, and we present formulae to calculate the exact dimensions of these intensity maps. Furthermore, we extend our analysis to include scenarios where the parameters of the motion distribution are not known with certainty, but rather can take any value in some range. In these cases, we derive a similar threshold to determine the structure of an optimal margin intensity map. |
2009 |
|
5. | Experimental evaluation of a robust optimization method for IMRT of moving targets Journal Article C. Vrančić, A. Trofimov, T. C. Y. Chan, G. C. Sharp, T. Bortfeld Physics in Medicine and Biology, Vol. 54, pp. 2901-2914, 2009. @article{ChanTCY.J005, title = {Experimental evaluation of a robust optimization method for IMRT of moving targets}, author = {C. Vrančić and A. Trofimov and T. C. Y. Chan and G. C. Sharp and T. Bortfeld}, doi = {10.1088/0031-9155/54/9/021}, year = {2009}, date = {2009-03-25}, journal = {Physics in Medicine and Biology}, volume = {54}, pages = {2901-2914}, abstract = {Internal organ motion during radiation therapy, if not considered appropriately in the planning process, has been shown to reduce target coverage and increase the dose to healthy tissues. Standard planning approaches, which use safety margins to handle intrafractional movement of the tumor, are typically designed based on the maximum amplitude of motion, and are often overly conservative. Comparable coverage and reduced dose to healthy organs appear achievable with robust motion-adaptive treatment planning, which considers the expected probability distribution of the average target position and the uncertainty of its realization during treatment delivery. A dosimetric test of a robust optimization method for IMRT was performed, using patient breathing data. External marker motion data acquired from respiratory-gated radiotherapy patients were used to build and test the framework for robust optimization. The motion trajectories recorded during radiation treatment itself are not strictly necessary to generate the initial version of a robust treatment plan, but can be used to adapt the plan during the course of treatment. Single-field IMRT plans were optimized to deliver a uniform dose to a rectangular area. During delivery on a linear accelerator, a computer-driven motion phantom reproduced the patients' breathing patterns and a two-dimensional ionization detector array measured the dose delivered. The dose distributions from robust-optimized plans were compared to those from standard plans, which used a margin expansion. Dosimetric tests confirmed the improved sparing of the non-target area with robust planning, which was achieved without compromising the target coverage. The maximum dose in robust plans did not exceed 110% of the prescription, while the minimum target doses were comparable in standard and robust plans. In test courses, optimized for a simplified target geometry, and delivered to a phantom that moved in one dimension with an average amplitude of 17 mm, the robust treatment design produced a reduction of more than 12% of the integral dose to non-target areas, compared to the standard plan using 10 mm margin expansion.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Internal organ motion during radiation therapy, if not considered appropriately in the planning process, has been shown to reduce target coverage and increase the dose to healthy tissues. Standard planning approaches, which use safety margins to handle intrafractional movement of the tumor, are typically designed based on the maximum amplitude of motion, and are often overly conservative. Comparable coverage and reduced dose to healthy organs appear achievable with robust motion-adaptive treatment planning, which considers the expected probability distribution of the average target position and the uncertainty of its realization during treatment delivery. A dosimetric test of a robust optimization method for IMRT was performed, using patient breathing data. External marker motion data acquired from respiratory-gated radiotherapy patients were used to build and test the framework for robust optimization. The motion trajectories recorded during radiation treatment itself are not strictly necessary to generate the initial version of a robust treatment plan, but can be used to adapt the plan during the course of treatment. Single-field IMRT plans were optimized to deliver a uniform dose to a rectangular area. During delivery on a linear accelerator, a computer-driven motion phantom reproduced the patients' breathing patterns and a two-dimensional ionization detector array measured the dose delivered. The dose distributions from robust-optimized plans were compared to those from standard plans, which used a margin expansion. Dosimetric tests confirmed the improved sparing of the non-target area with robust planning, which was achieved without compromising the target coverage. The maximum dose in robust plans did not exceed 110% of the prescription, while the minimum target doses were comparable in standard and robust plans. In test courses, optimized for a simplified target geometry, and delivered to a phantom that moved in one dimension with an average amplitude of 17 mm, the robust treatment design produced a reduction of more than 12% of the integral dose to non-target areas, compared to the standard plan using 10 mm margin expansion. |
2008 |
|
4. | Robust management of motion uncertainty in intensity modulated radiation therapy Journal Article T. Bortfeld, T. C. Y. Chan, A. Trofimov, J. N. Tsitsiklis Operations Research, Vol. 56, pp. 1461-1473, 2008. @article{ChanTCY.J004, title = {Robust management of motion uncertainty in intensity modulated radiation therapy}, author = {T. Bortfeld and T. C. Y. Chan and A. Trofimov and J. N. Tsitsiklis}, doi = {10.1287/opre.1070.0484}, year = {2008}, date = {2008-04-01}, journal = {Operations Research}, volume = {56}, pages = {1461-1473}, abstract = {Radiation therapy is subject to uncertainties that need to be accounted for when determining a suitable treatment plan for a cancer patient. For lung and liver tumors, the presence of breathing motion during treatment is a challenge to the effective and reliable delivery of the radiation. In this paper, we build a model of motion uncertainty using probability density functions that describe breathing motion, and provide a robust formulation of the problem of optimizing intensity-modulated radiation therapy. We populate our model with real patient data and measure the robustness of the resulting solutions on a clinical lung example. Our robust framework generalizes current mathematical programming formulations that account for motion, and gives insight into the trade-off between sparing the healthy tissues and ensuring that the tumor receives sufficient dose. For comparison, we also compute solutions to a nominal (no uncertainty) and margin (worst-case) formulation. In our experiments, we found that the nominal solution typically underdosed the tumor in the unacceptable range of 6% to 11%, whereas the robust solution underdosed by only 1% to 2% in the worst case. In addition, the robust solution reduced the total dose delivered to the main organ-at-risk (the left lung) by roughly 11% on average, as compared to the margin solution.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Radiation therapy is subject to uncertainties that need to be accounted for when determining a suitable treatment plan for a cancer patient. For lung and liver tumors, the presence of breathing motion during treatment is a challenge to the effective and reliable delivery of the radiation. In this paper, we build a model of motion uncertainty using probability density functions that describe breathing motion, and provide a robust formulation of the problem of optimizing intensity-modulated radiation therapy. We populate our model with real patient data and measure the robustness of the resulting solutions on a clinical lung example. Our robust framework generalizes current mathematical programming formulations that account for motion, and gives insight into the trade-off between sparing the healthy tissues and ensuring that the tumor receives sufficient dose. For comparison, we also compute solutions to a nominal (no uncertainty) and margin (worst-case) formulation. In our experiments, we found that the nominal solution typically underdosed the tumor in the unacceptable range of 6% to 11%, whereas the robust solution underdosed by only 1% to 2% in the worst case. In addition, the robust solution reduced the total dose delivered to the main organ-at-risk (the left lung) by roughly 11% on average, as compared to the margin solution. |
3. | Tumor trailing strategy for intensity-modulated radiation therapy of moving targets Journal Article A. Trofimov, C. Vrancic, T. C. Y. Chan, G. C. Sharp, T. Bortfeld Medical Physics, Vol. 35, pp. 1718-1733, 2008. @article{ChanTCY.J003, title = {Tumor trailing strategy for intensity-modulated radiation therapy of moving targets}, author = {A. Trofimov and C. Vrancic and T. C. Y. Chan and G. C. Sharp and T. Bortfeld}, doi = {10.1118/1.2900108}, year = {2008}, date = {2008-02-22}, journal = {Medical Physics}, volume = {35}, pages = {1718-1733}, abstract = {Internal organ motion during the course of radiation therapy of cancer affects the distribution of the delivered dose and, generally, reduces its conformality to the targeted volume. Previously proposed approaches aimed at mitigating the effect of internal motion in intensity-modulated radiation therapy (IMRT) included expansion of the target margins, motion-correlated delivery (e.g., respiratory gating, tumor tracking), and adaptive treatment plan optimization employing a probabilistic description of motion. We describe and test the tumor trailing strategy, which utilizes the synergy of motion-adaptive treatment planning and delivery methods. We regard the (rigid) target motion as a superposition of a relatively fast cyclic component (e.g., respiratory) and slow aperiodic trends (e.g., the drift of exhalation baseline). In the trailing approach, these two components of motion are decoupled and dealt with separately. Real-time motion monitoring is employed to identify the 'slow' shifts, which are then corrected by applying setup adjustments. The delivery does not track the target position exactly, but trails the systematic trend due to the delay between the time a shift occurs, is reliably detected, and, subsequently, corrected. The 'fast' cyclic motion is accounted for with a robust motion-adaptive treatment planning, which allows for variability in motion parameters (e.g., mean and extrema of the tidal volume, variable period of respiration, and expiratory duration). Motion-surrogate data from gated IMRT treatments were used to provide probability distribution data for motion-adaptive planning and to test algorithms that identified systematic trends in the character of motion. Sample IMRT fields were delivered on a clinical linear accelerator to a programmable moving phantom. Dose measurements were performed with a commercial two-dimensional ion-chamber array. The results indicate that by reducing intrafractional motion variability, the trailing strategy enhances relevance and applicability of motion-adaptive planning methods, and improves conformality of the delivered dose to the target in the presence of irregular motion. Trailing strategy can be applied to respiratory-gated treatments, in which the correction for the slow motion can increase the duty cycle, while robust probabilistic planning can improve management of the residual motion within the gate window. Similarly, trailing may improve the dose conformality in treatment of patients who exhibit detectable target motion of low amplitude, which is considered insufficient to provide a clinical indication for the use of respiratory-gated treatment (e.g., peak-to-peak motion of less than 10 mm). The mechanical limitations of implementing tumor trailing are less rigorous than those of real-time tracking, and the same technology could be used for both.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Internal organ motion during the course of radiation therapy of cancer affects the distribution of the delivered dose and, generally, reduces its conformality to the targeted volume. Previously proposed approaches aimed at mitigating the effect of internal motion in intensity-modulated radiation therapy (IMRT) included expansion of the target margins, motion-correlated delivery (e.g., respiratory gating, tumor tracking), and adaptive treatment plan optimization employing a probabilistic description of motion. We describe and test the tumor trailing strategy, which utilizes the synergy of motion-adaptive treatment planning and delivery methods. We regard the (rigid) target motion as a superposition of a relatively fast cyclic component (e.g., respiratory) and slow aperiodic trends (e.g., the drift of exhalation baseline). In the trailing approach, these two components of motion are decoupled and dealt with separately. Real-time motion monitoring is employed to identify the 'slow' shifts, which are then corrected by applying setup adjustments. The delivery does not track the target position exactly, but trails the systematic trend due to the delay between the time a shift occurs, is reliably detected, and, subsequently, corrected. The 'fast' cyclic motion is accounted for with a robust motion-adaptive treatment planning, which allows for variability in motion parameters (e.g., mean and extrema of the tidal volume, variable period of respiration, and expiratory duration). Motion-surrogate data from gated IMRT treatments were used to provide probability distribution data for motion-adaptive planning and to test algorithms that identified systematic trends in the character of motion. Sample IMRT fields were delivered on a clinical linear accelerator to a programmable moving phantom. Dose measurements were performed with a commercial two-dimensional ion-chamber array. The results indicate that by reducing intrafractional motion variability, the trailing strategy enhances relevance and applicability of motion-adaptive planning methods, and improves conformality of the delivered dose to the target in the presence of irregular motion. Trailing strategy can be applied to respiratory-gated treatments, in which the correction for the slow motion can increase the duty cycle, while robust probabilistic planning can improve management of the residual motion within the gate window. Similarly, trailing may improve the dose conformality in treatment of patients who exhibit detectable target motion of low amplitude, which is considered insufficient to provide a clinical indication for the use of respiratory-gated treatment (e.g., peak-to-peak motion of less than 10 mm). The mechanical limitations of implementing tumor trailing are less rigorous than those of real-time tracking, and the same technology could be used for both. |
2007 |
|
2. | Accounting for range uncertainties in the optimization of intensity modulated proton therapy Journal Article J. Unkelbach, T. C. Y. Chan, T. Bortfeld Physics in Medicine and Biology, Vol. 52, pp. 2755-2773, 2007. @article{ChanTCY.J002, title = {Accounting for range uncertainties in the optimization of intensity modulated proton therapy}, author = {J. Unkelbach and T. C. Y. Chan and T. Bortfeld}, doi = {10.1088/0031-9155/52/10/009}, year = {2007}, date = {2007-03-08}, journal = {Physics in Medicine and Biology}, volume = {52}, pages = {2755-2773}, abstract = {Treatment plans optimized for intensity modulated proton therapy (IMPT) may be sensitive to range variations. The dose distribution may deteriorate substantially when the actual range of a pencil beam does not match the assumed range. We present two treatment planning concepts for IMPT which incorporate range uncertainties into the optimization. The first method is a probabilistic approach. The range of a pencil beam is assumed to be a random variable, which makes the delivered dose and the value of the objective function a random variable too. We then propose to optimize the expectation value of the objective function. The second approach is a robust formulation that applies methods developed in the field of robust linear programming. This approach optimizes the worst case dose distribution that may occur, assuming that the ranges of the pencil beams may vary within some interval. Both methods yield treatment plans that are considerably less sensitive to range variations compared to conventional treatment plans optimized without accounting for range uncertainties. In addition, both approaches—although conceptually different—yield very similar results on a qualitative level.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Treatment plans optimized for intensity modulated proton therapy (IMPT) may be sensitive to range variations. The dose distribution may deteriorate substantially when the actual range of a pencil beam does not match the assumed range. We present two treatment planning concepts for IMPT which incorporate range uncertainties into the optimization. The first method is a probabilistic approach. The range of a pencil beam is assumed to be a random variable, which makes the delivered dose and the value of the objective function a random variable too. We then propose to optimize the expectation value of the objective function. The second approach is a robust formulation that applies methods developed in the field of robust linear programming. This approach optimizes the worst case dose distribution that may occur, assuming that the ranges of the pencil beams may vary within some interval. Both methods yield treatment plans that are considerably less sensitive to range variations compared to conventional treatment plans optimized without accounting for range uncertainties. In addition, both approaches—although conceptually different—yield very similar results on a qualitative level. |
2006 |
|
1. | A robust approach to IMRT optimization Journal Article T. C. Y. Chan, T. Bortfeld, J. N. Tsitsiklis Physics in Medicine and Biology, Vol. 51, pp. 2567-2583, 2006. @article{ChanTCY.J001, title = {A robust approach to IMRT optimization}, author = {T. C. Y. Chan and T. Bortfeld and J. N. Tsitsiklis}, doi = {10.1088/0031-9155/51/10/014}, year = {2006}, date = {2006-01-26}, journal = {Physics in Medicine and Biology}, volume = {51}, pages = {2567-2583}, abstract = {Managing uncertainty is a major challenge in radiation therapy treatment planning, including uncertainty induced by intrafraction motion, which is particularly important for tumours in the thorax and abdomen. Common methods to account for motion are to introduce a margin or to convolve the static dose distribution with a motion probability density function. Unlike previous work in this area, our development does not assume that the patient breathes according to a fixed distribution, nor is the patient required to breathe the same way throughout the treatment. Despite this generality, we create a robust optimization framework starting from the convolution method that is robust to fluctuations in breathing motion, yet spares healthy tissue better than a margin solution. We describe how to generate the data for our model using breathing motion data and we test our model on a computer phantom using data from real patients. In our numerical results, the robust solution delivers approximately 38% less dose to the healthy tissue than the margin solution, while providing the same level of protection against breathing uncertainty.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Managing uncertainty is a major challenge in radiation therapy treatment planning, including uncertainty induced by intrafraction motion, which is particularly important for tumours in the thorax and abdomen. Common methods to account for motion are to introduce a margin or to convolve the static dose distribution with a motion probability density function. Unlike previous work in this area, our development does not assume that the patient breathes according to a fixed distribution, nor is the patient required to breathe the same way throughout the treatment. Despite this generality, we create a robust optimization framework starting from the convolution method that is robust to fluctuations in breathing motion, yet spares healthy tissue better than a margin solution. We describe how to generate the data for our model using breathing motion data and we test our model on a computer phantom using data from real patients. In our numerical results, the robust solution delivers approximately 38% less dose to the healthy tissue than the margin solution, while providing the same level of protection against breathing uncertainty. |
8. | Introducing and integrating machine learning in an operations research curriculum: an application-driven course Miscellaneous J. J. Boutilier, T. C. Y. Chan under revision for INFORMS Transactions on Education, 2020. @misc{ChanTCY.Pre003, title = {Introducing and integrating machine learning in an operations research curriculum: an application-driven course}, author = {J. J. Boutilier and T. C. Y. Chan}, year = {2020}, date = {2020-12-03}, journal = {under revision for INFORMS Transactions on Education}, howpublished = {under revision for INFORMS Transactions on Education}, keywords = {}, pubstate = {published}, tppubtype = {misc} } |
7. | Public defibrillator accessibility and mobility trends during the COVID-19 pandemic in Canada Miscellaneous K. H. B. Leung, R. Alam, S. C. Brooks, T. C. Y. Chan under review at Resuscitation, 2020. @misc{ChanTCY.Pre002, title = {Public defibrillator accessibility and mobility trends during the COVID-19 pandemic in Canada}, author = {K. H. B. Leung and R. Alam and S. C. Brooks and T. C. Y. Chan}, year = {2020}, date = {2020-12-02}, howpublished = {under review at Resuscitation}, keywords = {}, pubstate = {published}, tppubtype = {misc} } |
6. | Inverse mixed integer optimization: Certificate sets and trust region methods Miscellaneous M. Bodur, T. C. Y. Chan, I. Zhu under review at Operations Research, 2020. @misc{ChanTCY.Pre001, title = {Inverse mixed integer optimization: Certificate sets and trust region methods}, author = {M. Bodur and T. C. Y. Chan and I. Zhu}, url = {https://arxiv.org/abs/2008.00301}, year = {2020}, date = {2020-08-01}, journal = {under review at Operations Research}, abstract = {Inverse optimization, determining parameters of an optimization problem that render a given solution optimal, has received increasing attention in recent years. While significant inverse optimization literature exists for convex optimization problems, there have been few advances for discrete problems, despite the ubiquity of applications that fundamentally rely on discrete decision-making. In this paper, we present a new set of theoretical insights and algorithms for the general class of inverse mixed integer linear optimization problems. Our theoretical results establish a new characterization of optimality conditions, defined as certificate sets, which are leveraged to design new types of cutting plane algorithms using trust regions. Through an extensive set of computational experiments, we show that our methods provide substantial improvements over existing methods in solving the largest and most difficult instances to date.}, howpublished = {under review at Operations Research}, keywords = {}, pubstate = {published}, tppubtype = {misc} } Inverse optimization, determining parameters of an optimization problem that render a given solution optimal, has received increasing attention in recent years. While significant inverse optimization literature exists for convex optimization problems, there have been few advances for discrete problems, despite the ubiquity of applications that fundamentally rely on discrete decision-making. In this paper, we present a new set of theoretical insights and algorithms for the general class of inverse mixed integer linear optimization problems. Our theoretical results establish a new characterization of optimality conditions, defined as certificate sets, which are leveraged to design new types of cutting plane algorithms using trust regions. Through an extensive set of computational experiments, we show that our methods provide substantial improvements over existing methods in solving the largest and most difficult instances to date. |
5. | Cherry-picking and its negative effect on system service level: evidence from a radiology workflow platform Miscellaneous T. C. Y. Chan, N. Howard, S. Lagzi, G. Romero 2020. @misc{ChanTCY.Pre007, title = {Cherry-picking and its negative effect on system service level: evidence from a radiology workflow platform}, author = {T. C. Y. Chan and N. Howard and S. Lagzi and G. Romero}, url = {https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3273494}, year = {2020}, date = {2020-06-18}, abstract = {Piece-rate compensation schemes, where workers are paid for each completed task regardless of the time spent on it, are common in practice. However, imbalances between pay and workload of different tasks could result in cherry-picking of tasks deemed to have high pay relative to workload. Using a large dataset from a radiology workflow platform that connects off-site radiologists with hospitals, we empirically investigate whether radiologists cherry-pick tasks with high pay-to-workload, and if cherry-picking has a negative impact on system service level. In the platform we study, radiologists have discretion to select tasks from a common pool, and the service level is characterized by meeting priority-specific turnaround time targets. We show that turnaround time is monotonically decreasing in pay-to-workload. More importantly, we also show a spillover effect. Namely, that cherry-picking of low priority tasks can lead to longer turnaround times for higher priority tasks, resulting in delays. Our results suggest that organizations where workers have task discretion from a common pool need to carefully align their piece-rate compensation scheme with the workload of each task. Imbalances may lead to a degradation in the system service level provided to time-sensitive customers.}, keywords = {}, pubstate = {published}, tppubtype = {misc} } Piece-rate compensation schemes, where workers are paid for each completed task regardless of the time spent on it, are common in practice. However, imbalances between pay and workload of different tasks could result in cherry-picking of tasks deemed to have high pay relative to workload. Using a large dataset from a radiology workflow platform that connects off-site radiologists with hospitals, we empirically investigate whether radiologists cherry-pick tasks with high pay-to-workload, and if cherry-picking has a negative impact on system service level. In the platform we study, radiologists have discretion to select tasks from a common pool, and the service level is characterized by meeting priority-specific turnaround time targets. We show that turnaround time is monotonically decreasing in pay-to-workload. More importantly, we also show a spillover effect. Namely, that cherry-picking of low priority tasks can lead to longer turnaround times for higher priority tasks, resulting in delays. Our results suggest that organizations where workers have task discretion from a common pool need to carefully align their piece-rate compensation scheme with the workload of each task. Imbalances may lead to a degradation in the system service level provided to time-sensitive customers. |
4. | An inverse optimization approach to measuring clinical pathway concordance Miscellaneous T. C. Y. Chan, M. Eberg, K. Forster, C. Holloway, L. Ieraci, Y. Shalaby, N. Yousefi under second review at Management Science, 2020. @misc{ChanTCY.Pre008, title = {An inverse optimization approach to measuring clinical pathway concordance}, author = {T. C. Y. Chan and M. Eberg and K. Forster and C. Holloway and L. Ieraci and Y. Shalaby and N. Yousefi}, url = {https://arxiv.org/abs/1906.02636}, year = {2020}, date = {2020-06-01}, abstract = {Clinical pathways outline standardized processes in the delivery of care for a specific disease. Patient journeys through the healthcare system, though, can deviate substantially from these pathways. Given the positive benefits of clinical pathways, it is important to measure the concordance of patient pathways so that variations in health system performance or bottlenecks in the delivery of care can be detected, monitored, and acted upon. This paper proposes the first data-driven inverse optimization approach to measuring pathway concordance in any problem context. Our specific application considers clinical pathway concordance for stage III colon cancer. We develop a novel concordance metric and demonstrate using real patient data from Ontario, Canada that it has a statistically significant association with survival. Our methodological approach considers a patient's journey as a walk in a directed graph, where the costs on the arcs are derived by solving an inverse shortest path problem. The inverse optimization model uses two sources of information to find the arc costs: reference pathways developed by a provincial cancer agency (primary) and data from real-world patient-related activity from patients with both positive and negative clinical outcomes (secondary). Thus, our inverse optimization framework extends existing models by including data points of both varying "primacy" and "alignment". Data primacy is addressed through a two-stage approach to imputing the cost vector, while data alignment is addressed by a hybrid objective function that aims to minimize and maximize suboptimality error for different subsets of input data.}, howpublished = {under second review at Management Science}, keywords = {}, pubstate = {published}, tppubtype = {misc} } Clinical pathways outline standardized processes in the delivery of care for a specific disease. Patient journeys through the healthcare system, though, can deviate substantially from these pathways. Given the positive benefits of clinical pathways, it is important to measure the concordance of patient pathways so that variations in health system performance or bottlenecks in the delivery of care can be detected, monitored, and acted upon. This paper proposes the first data-driven inverse optimization approach to measuring pathway concordance in any problem context. Our specific application considers clinical pathway concordance for stage III colon cancer. We develop a novel concordance metric and demonstrate using real patient data from Ontario, Canada that it has a statistically significant association with survival. Our methodological approach considers a patient's journey as a walk in a directed graph, where the costs on the arcs are derived by solving an inverse shortest path problem. The inverse optimization model uses two sources of information to find the arc costs: reference pathways developed by a provincial cancer agency (primary) and data from real-world patient-related activity from patients with both positive and negative clinical outcomes (secondary). Thus, our inverse optimization framework extends existing models by including data points of both varying "primacy" and "alignment". Data primacy is addressed through a two-stage approach to imputing the cost vector, while data alignment is addressed by a hybrid objective function that aims to minimize and maximize suboptimality error for different subsets of input data. |
3. | Spatial price integration in competitive markets with capacitated transportation networks Miscellaneous J. R. Birge, T. C. Y. Chan, M. Pavlin, I. Zhu under revision for Operations Research, 2020. @misc{ChanTCY.Pre005, title = {Spatial price integration in competitive markets with capacitated transportation networks}, author = {J. R. Birge and T. C. Y. Chan and M. Pavlin and I. Zhu}, url = {https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3544530}, year = {2020}, date = {2020-03-23}, howpublished = {under revision for Operations Research}, keywords = {}, pubstate = {published}, tppubtype = {misc} } |
2. | Response time optimization for drone-delivered automated external defibrillators Miscellaneous J. J. Boutilier, T. C. Y. Chan under revision for Manufacturing and Service Operations Management, 2019. @misc{ChanTCY.Pre006, title = {Response time optimization for drone-delivered automated external defibrillators}, author = {J. J. Boutilier and T. C. Y. Chan}, url = {https://arxiv.org/abs/1908.00149}, year = {2019}, date = {2019-07-31}, abstract = {Out-of-hospital cardiac arrest (OHCA) claims over 400,000 lives each year in North America and is one of the most time-sensitive medical emergencies. Drone-delivered automated external defibrillators (AEDs) have the potential to be a transformative innovation in the provision of emergency care for OHCA. In this paper, we propose a simulation-optimization framework to minimize the total number of drones required to meet a pre-specified response time goal, while guaranteeing a sufficient number of drones are located at each base. To do this, we develop a location-queuing model that is based on the p-median architecture, where each base constitutes an explicit M/M/d queue, and that incorporates estimated baseline response times to the demand points. We then develop a reformulation technique that exploits the baseline response times, allowing us to solve real-world instances to optimality using an off-the-shelf solver. To test our model, we develop a two-stage machine learning approach to simulate both the locations and baseline response times for future OHCAs. We demonstrate the application of our framework using eight years of real data from an area covering 26,000 square kilometres around Toronto, Canada. A modest number of drones are required to significantly reduce response times in all regions. Furthermore, an objective function focused on improving the 90th percentile is well-suited for use in practice because the model reduces the entire response time distribution, while providing equitable coverage in both cities and rural areas. Overall, this paper provides a realistic framework that can be leveraged by healthcare providers seeking to implement a drone network.}, howpublished = {under revision for Manufacturing and Service Operations Management}, keywords = {}, pubstate = {published}, tppubtype = {misc} } Out-of-hospital cardiac arrest (OHCA) claims over 400,000 lives each year in North America and is one of the most time-sensitive medical emergencies. Drone-delivered automated external defibrillators (AEDs) have the potential to be a transformative innovation in the provision of emergency care for OHCA. In this paper, we propose a simulation-optimization framework to minimize the total number of drones required to meet a pre-specified response time goal, while guaranteeing a sufficient number of drones are located at each base. To do this, we develop a location-queuing model that is based on the p-median architecture, where each base constitutes an explicit M/M/d queue, and that incorporates estimated baseline response times to the demand points. We then develop a reformulation technique that exploits the baseline response times, allowing us to solve real-world instances to optimality using an off-the-shelf solver. To test our model, we develop a two-stage machine learning approach to simulate both the locations and baseline response times for future OHCAs. We demonstrate the application of our framework using eight years of real data from an area covering 26,000 square kilometres around Toronto, Canada. A modest number of drones are required to significantly reduce response times in all regions. Furthermore, an objective function focused on improving the 90th percentile is well-suited for use in practice because the model reduces the entire response time distribution, while providing equitable coverage in both cities and rural areas. Overall, this paper provides a realistic framework that can be leveraged by healthcare providers seeking to implement a drone network. |
1. | Sparse flexible design: a machine learning approach Miscellaneous T. C. Y. Chan, D. Letourneau, B. Potter under revision for Flexible Services and Manufacturing Journal, 2019. @misc{ChanTCY.Pre009, title = {Sparse flexible design: a machine learning approach}, author = {T. C. Y. Chan and D. Letourneau and B. Potter}, url = {https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3390000}, year = {2019}, date = {2019-06-06}, abstract = {For a general production network, state-of-the-art methods for constructing sparse flexible designs are heuristic in nature, typically computing a proxy for the quality of unseen networks and using that estimate in a greedy manner to modify a current design. This paper develops two machine learning-based approaches to constructing sparse flexible designs that leverage a neural network to accurately and quickly predict the performance of large numbers of candidate designs. We demonstrate that our heuristics are competitive with existing approaches and produce high-quality solutions for both balanced and unbalanced networks. Finally, we introduce a novel application of process flexibility in healthcare operations to demonstrate the effectiveness of our approach in a large numerical case study. We study the flexibility of linear accelerators that deliver radiation to treat various types of cancer. We demonstrate how clinical constraints can be easily absorbed into the machine learning subroutine and how our sparse flexible treatment networks meet or beat the performance of those designed by state-of-the-art methods.}, howpublished = {under revision for Flexible Services and Manufacturing Journal}, keywords = {}, pubstate = {published}, tppubtype = {misc} } For a general production network, state-of-the-art methods for constructing sparse flexible designs are heuristic in nature, typically computing a proxy for the quality of unseen networks and using that estimate in a greedy manner to modify a current design. This paper develops two machine learning-based approaches to constructing sparse flexible designs that leverage a neural network to accurately and quickly predict the performance of large numbers of candidate designs. We demonstrate that our heuristics are competitive with existing approaches and produce high-quality solutions for both balanced and unbalanced networks. Finally, we introduce a novel application of process flexibility in healthcare operations to demonstrate the effectiveness of our approach in a large numerical case study. We study the flexibility of linear accelerators that deliver radiation to treat various types of cancer. We demonstrate how clinical constraints can be easily absorbed into the machine learning subroutine and how our sparse flexible treatment networks meet or beat the performance of those designed by state-of-the-art methods. |
7. | The importance of evaluating the complete knowledge-based automated planning pipeline Inproceedings A. Babier, R. Mahmood, A. Diamant, A. McNiven, T. C. Y. Chan Proceedings of the International Conference on the use of Computers in Radiation Therapy, 2019. @inproceedings{ChanTCY.Oth008, title = {The importance of evaluating the complete knowledge-based automated planning pipeline}, author = {A. Babier and R. Mahmood and A. Diamant and A. McNiven and T. C. Y. Chan}, year = {2019}, date = {2019-01-01}, booktitle = {Proceedings of the International Conference on the use of Computers in Radiation Therapy}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } |
6. | Automated treatment planning in radiation therapy using generative adversarial networks Inproceedings R. Mahmood, A. Babier, A. McNiven, A. Diamant, T. C. Y. Chan Proceedings of the 3rd Machine Learning for Healthcare Conference, pp. 484-499, PMLR, 2018. @inproceedings{ChanTCY.Oth007, title = {Automated treatment planning in radiation therapy using generative adversarial networks}, author = {R. Mahmood and A. Babier and A. McNiven and A. Diamant and T. C. Y. Chan}, url = {http://proceedings.mlr.press/v85/mahmood18a.html}, year = {2018}, date = {2018-01-01}, booktitle = {Proceedings of the 3rd Machine Learning for Healthcare Conference}, volume = {85}, pages = {484-499}, publisher = {PMLR}, abstract = {Knowledge-based planning (KBP) is an automated approach to radiation therapy treatment planning that involves predicting desirable treatment plans before they are then corrected to deliverable ones. We propose a generative adversarial network (GAN) approach for predicting desirable 3D dose distributions that eschews the previous paradigms of site-specific feature engineering and predicting low-dimensional representations of the plan. Experiments on a dataset of oropharyngeal cancer patients show that our approach significantly outperforms previous methods on several clinical satisfaction criteria and similarity metrics.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Knowledge-based planning (KBP) is an automated approach to radiation therapy treatment planning that involves predicting desirable treatment plans before they are then corrected to deliverable ones. We propose a generative adversarial network (GAN) approach for predicting desirable 3D dose distributions that eschews the previous paradigms of site-specific feature engineering and predicting low-dimensional representations of the plan. Experiments on a dataset of oropharyngeal cancer patients show that our approach significantly outperforms previous methods on several clinical satisfaction criteria and similarity metrics. |
5. | The value of flexibility in baseball roster construction Inproceedings T. C. Y. Chan, D. S. Fearing Proceedings of the 7th Annual MIT Sloan Sports Analytics Conference, 2013. @inproceedings{ChanTCY.Oth006, title = {The value of flexibility in baseball roster construction}, author = {T. C. Y. Chan and D. S. Fearing}, url = {http://www.sloansportsconference.com/wp-content/uploads/2013/The%20value%20of%20flexibility%20in%20baseball%20roster%20construction.pdf}, year = {2013}, date = {2013-03-01}, booktitle = {Proceedings of the 7th Annual MIT Sloan Sports Analytics Conference}, abstract = {Drawing inspiration from the theory of production flexibility in manufacturing networks, we provide the first optimization-based analysis of the value of positional flexibility (the ability of a player to play multiple positions) for a major league baseball team in the presence of injury risk. First, we develop novel statistical models to estimate (1) the likelihood and duration of player injuries during the regular season, and (2) fielding abilities at secondary fielding positions. Next, we develop a robust optimization model to calculate the degradation in team performance due to injuries. Finally, we apply this model to measure the difference in performance between a team with players who have positional flexibility and a team that does not. We find that using 2012 rosters, flexibility was expected to create from 3% (White Sox) to 15% (Cubs) in value for each team, measured in runs above replacement. In analyzing the results, we find that platoon advantages (e.g., having left-handed batters face right-handed pitchers) form an important component of flexibility. As a secondary finding, based on our statistical analysis of injuries, we find that the likelihood of injury increases with age, but the duration of injury does not.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Drawing inspiration from the theory of production flexibility in manufacturing networks, we provide the first optimization-based analysis of the value of positional flexibility (the ability of a player to play multiple positions) for a major league baseball team in the presence of injury risk. First, we develop novel statistical models to estimate (1) the likelihood and duration of player injuries during the regular season, and (2) fielding abilities at secondary fielding positions. Next, we develop a robust optimization model to calculate the degradation in team performance due to injuries. Finally, we apply this model to measure the difference in performance between a team with players who have positional flexibility and a team that does not. We find that using 2012 rosters, flexibility was expected to create from 3% (White Sox) to 15% (Cubs) in value for each team, measured in runs above replacement. In analyzing the results, we find that platoon advantages (e.g., having left-handed batters face right-handed pitchers) form an important component of flexibility. As a secondary finding, based on our statistical analysis of injuries, we find that the likelihood of injury increases with age, but the duration of injury does not. |
4. | Examining the LEED rating system using approximate inverse optimization Inproceedings S. D. O. Turner, T. C. Y. Chan Proceedings of the ASME 2012 International Mechanical Engineering Congress and Exposition, 2012. @inproceedings{ChanTCY.Oth005, title = {Examining the LEED rating system using approximate inverse optimization}, author = {S. D. O. Turner and T. C. Y. Chan}, doi = {10.1115/IMECE2012-93116}, year = {2012}, date = {2012-11-01}, booktitle = {Proceedings of the ASME 2012 International Mechanical Engineering Congress and Exposition}, abstract = {The Leadership in Energy and Environmental Design (LEED) rating system is the most recognized green building certification program in North America. In order to be LEED certified, a building must earn a certain number of points, which are obtained through achieving certain credits or design elements. Prior to LEED version 3, each credit was worth one point. In this study, we develop an inverse optimization approach to examine how building designers intrinsically valued design elements in LEED version 2. Due to the large dimensionality of the inverse optimization problem, we develop an approximation to improve tractability. We apply our method to 18 different LEED-certified buildings in the United States. We find that building designers did not value all credits equally and that other factors such as cost and certification level play a role in how the credits are valued. Overall, inverse optimization may provide a new method to assess historical data and support the design of future versions of LEED.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } The Leadership in Energy and Environmental Design (LEED) rating system is the most recognized green building certification program in North America. In order to be LEED certified, a building must earn a certain number of points, which are obtained through achieving certain credits or design elements. Prior to LEED version 3, each credit was worth one point. In this study, we develop an inverse optimization approach to examine how building designers intrinsically valued design elements in LEED version 2. Due to the large dimensionality of the inverse optimization problem, we develop an approximation to improve tractability. We apply our method to 18 different LEED-certified buildings in the United States. We find that building designers did not value all credits equally and that other factors such as cost and certification level play a role in how the credits are valued. Overall, inverse optimization may provide a new method to assess historical data and support the design of future versions of LEED. |
3. | Split personalities of NHL players: using clustering, projection and regression to measure individual point shares Inproceedings T. C. Y. Chan, D. C. Novati Proceedings of the 2012 MIT Sloan Sports Analytics Conference, 2012. @inproceedings{ChanTCY.Oth004, title = {Split personalities of NHL players: using clustering, projection and regression to measure individual point shares}, author = {T. C. Y. Chan and D. C. Novati}, url = {http://www.sloansportsconference.com/wp-content/uploads/2012/02/59-Chan_Novati_Split-personalities-of-NHL-players.pdf}, year = {2012}, date = {2012-03-01}, booktitle = {Proceedings of the 2012 MIT Sloan Sports Analytics Conference}, abstract = {Recent literature in hockey analytics has considered the use of clustering to determine specific categories or types of NHL players. Regression analysis has then been used to measure the contribution of each of these player types to team performance. This paper uses a combination of clustering, projection and regression methods to individualize the classification of NHL players. Instead of assigning each player to only one type, the overall "personality" of the player is split into fractional components representing different player types. The result is a unique make-up for each player, which is used to quantify his individual contributions to his team's performance, a metric known as "point shares". Top ranked players in terms of point shares tend to be winners of major NHL awards, are leaders in scoring, and have the highest salaries. High point shares in a contract year may also factor into salary increases. Overall, a better understanding of individual NHL player characteristics may provide a foundation for deeper, data-driven player analysis.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Recent literature in hockey analytics has considered the use of clustering to determine specific categories or types of NHL players. Regression analysis has then been used to measure the contribution of each of these player types to team performance. This paper uses a combination of clustering, projection and regression methods to individualize the classification of NHL players. Instead of assigning each player to only one type, the overall "personality" of the player is split into fractional components representing different player types. The result is a unique make-up for each player, which is used to quantify his individual contributions to his team's performance, a metric known as "point shares". Top ranked players in terms of point shares tend to be winners of major NHL awards, are leaders in scoring, and have the highest salaries. High point shares in a contract year may also factor into salary increases. Overall, a better understanding of individual NHL player characteristics may provide a foundation for deeper, data-driven player analysis. |
2. | Single and multi-agent exploration of a Markov decision process Inproceedings T. C. Y. Chan, E. Feron Proceedings of the 42nd Allerton Conference on Communication, Control, and Computing, 2004. @inproceedings{ChanTCY.Oth003, title = {Single and multi-agent exploration of a Markov decision process}, author = {T. C. Y. Chan and E. Feron}, year = {2004}, date = {2004-10-02}, booktitle = {Proceedings of the 42nd Allerton Conference on Communication, Control, and Computing}, abstract = {In this paper, we investigate the problem of efficiently visiting every state of a Birth-Death Markov Decision Process (BDP). In the single-agent case, we consider two different BDPs and derive their corresponding optimal policies - one being a pure greedy policy, and the other being a threshold policy based on greedy actions. We then generalize to a multi-agent setting and prove the optimality of a greedy policy in a related problem}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } In this paper, we investigate the problem of efficiently visiting every state of a Birth-Death Markov Decision Process (BDP). In the single-agent case, we consider two different BDPs and derive their corresponding optimal policies - one being a pure greedy policy, and the other being a threshold policy based on greedy actions. We then generalize to a multi-agent setting and prove the optimality of a greedy policy in a related problem |
1. | Development of an automated tracking system of tagged wild animals Inproceedings M. Ishutkina, T. Chan, E. Feron Proceedings of the Automation Technology for Off-Road Equipment Conference, 2004. @inproceedings{ChanTCY.Oth002, title = {Development of an automated tracking system of tagged wild animals}, author = {M. Ishutkina and T. Chan and E. Feron}, doi = {10.13031/2013.17827}, year = {2004}, date = {2004-10-01}, booktitle = {Proceedings of the Automation Technology for Off-Road Equipment Conference}, abstract = {The objective of this study is to develop a tracking algorithm which would allow a biologist to locate tagged animals using an unmanned aerial vehicle. The algorithm is developed to track the red wolves in a wildlife refuge in North Carolina. The red wolf is an endangered species and there are about a hundred of them living in the wild. For tracking purposes, each animal is outfitted with a collar containing a radio transmitter which emits a signal of a specific frequency over a given range. Using transmitter signal range, we discretize the refuge terrain map and develop a suboptimal greedy algorithm to search for the probabilistic targets striving to minimize the expected traveling time. To reduce the computational complexity, we consider a two-layered approach to the search problem. At the top level, the algorithm chooses the pack sectors as targets and, at the lower level, employs a local policy to travel between the discrete sensing locations. Using the animal behavior model provided by the wildlife biologists, we develop the probability rules to update the predicted target locations given the location of already found wolves. Finally, we present the results of a numerical simulation.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } The objective of this study is to develop a tracking algorithm which would allow a biologist to locate tagged animals using an unmanned aerial vehicle. The algorithm is developed to track the red wolves in a wildlife refuge in North Carolina. The red wolf is an endangered species and there are about a hundred of them living in the wild. For tracking purposes, each animal is outfitted with a collar containing a radio transmitter which emits a signal of a specific frequency over a given range. Using transmitter signal range, we discretize the refuge terrain map and develop a suboptimal greedy algorithm to search for the probabilistic targets striving to minimize the expected traveling time. To reduce the computational complexity, we consider a two-layered approach to the search problem. At the top level, the algorithm chooses the pack sectors as targets and, at the lower level, employs a local policy to travel between the discrete sensing locations. Using the animal behavior model provided by the wildlife biologists, we develop the probability rules to update the predicted target locations given the location of already found wolves. Finally, we present the results of a numerical simulation. |
2. | Robust optimization methods Book Chapter T. C. Y. Chan, P. A. C. Mar Terlaky, T; Anjos, M; Ahmed, S (Ed.): Advances and Trends in Optimization with Engineering Applications, Chapter 25, pp. 333-344, SIAM, Philadelphia, 2017. @inbook{ChanTCY.Oth010, title = {Robust optimization methods}, author = {T. C. Y. Chan and P. A. C. Mar}, editor = {T. Terlaky and M. Anjos and S. Ahmed}, doi = {10.1137/1.9781611974683.ch25}, year = {2017}, date = {2017-02-01}, booktitle = {Advances and Trends in Optimization with Engineering Applications}, pages = {333-344}, publisher = {SIAM}, address = {Philadelphia}, chapter = {25}, abstract = {Uncertainty in the parameters of an optimization problem can have a large impact on the quality of the resulting solutions, even rendering such solutions infeasible if unexpected scenarios arise [212]. Robust optimization is one method for optimizing in the presence of uncertainty. The last 15 years have seen an explosion in activity in robust optimization driven by initial work done to explicitly reformulate several different optimization models under set-based uncertainty as tractable mathematical optimization problems. The success of robust optimization can be attributed to its computational tractability and its ability to produce quality solutions for a large class of problems. From a modeling viewpoint, robust optimization conveniently does not require complete knowledge of the probability distribution of the uncertain parameters.}, keywords = {}, pubstate = {published}, tppubtype = {inbook} } Uncertainty in the parameters of an optimization problem can have a large impact on the quality of the resulting solutions, even rendering such solutions infeasible if unexpected scenarios arise [212]. Robust optimization is one method for optimizing in the presence of uncertainty. The last 15 years have seen an explosion in activity in robust optimization driven by initial work done to explicitly reformulate several different optimization models under set-based uncertainty as tractable mathematical optimization problems. The success of robust optimization can be attributed to its computational tractability and its ability to produce quality solutions for a large class of problems. From a modeling viewpoint, robust optimization conveniently does not require complete knowledge of the probability distribution of the uncertain parameters. |
1. | Robust wind farm layout optimization Book Chapter P. Y. Zhang, J. Y. J. Kuo, D. Romero, T. C. Y. Chan, C. H. Amon Terlaky, T; Anjos, M; Ahmed, S (Ed.): Advances and Trends in Optimization with Engineering Applications, Chapter 28, pp. 367-374, SIAM, Philadelphia, 2017. @inbook{ChanTCY.Oth009, title = {Robust wind farm layout optimization}, author = {P. Y. Zhang and J. Y. J. Kuo and D. Romero and T. C. Y. Chan and C. H. Amon}, editor = {T. Terlaky and M. Anjos and S. Ahmed}, doi = {10.1137/1.9781611974683.ch28}, year = {2017}, date = {2017-01-01}, booktitle = {Advances and Trends in Optimization with Engineering Applications}, pages = {367-374}, publisher = {SIAM}, address = {Philadelphia}, chapter = {28}, abstract = {The number of commercial-scale wind farms and total installed capacity have grown dramatically over the past two decades [1958]. Relying on an essentially free but intermittent energy source and spanning large areas of land or water, wind farms face unique challenges in their design, installation, and operations. Two types of optimization problems arise naturally from this context: (a) optimization of aggregated energy output by strategic turbine placement and (b) optimization of electrical dispatch policies to reduce the economic impact of wind intermittency. In this chapter, we apply robust optimization to the former and discuss the potential benefits on both total energy output and variability in energy output, which can impact dispatch policies.}, keywords = {}, pubstate = {published}, tppubtype = {inbook} } The number of commercial-scale wind farms and total installed capacity have grown dramatically over the past two decades [1958]. Relying on an essentially free but intermittent energy source and spanning large areas of land or water, wind farms face unique challenges in their design, installation, and operations. Two types of optimization problems arise naturally from this context: (a) optimization of aggregated energy output by strategic turbine placement and (b) optimization of electrical dispatch policies to reduce the economic impact of wind intermittency. In this chapter, we apply robust optimization to the former and discuss the potential benefits on both total energy output and variability in energy output, which can impact dispatch policies. |
1. | Optimization under uncertainty in radiation therapy PhD Thesis T. C. Y. Chan Sloan School of Management, MIT, 2007. @phdthesis{Chan2007, title = {Optimization under uncertainty in radiation therapy}, author = {T. C. Y. Chan}, url = {https://dspace.mit.edu/handle/1721.1/40302}, year = {2007}, date = {2007-06-01}, address = {Operations Research Center}, school = {Sloan School of Management, MIT}, abstract = {In the context of patient care for life-threatening illnesses, the presence of uncertainty may compromise the quality of a treatment. In this thesis, we investigate robust approaches to managing uncertainty in radiation therapy treatments for cancer. In the first part of the thesis, we study the effect of breathing motion uncertainty on intensity-modulated radiation therapy treatments of a lung tumor. We construct a robust framework that generalizes current mathematical programming formulations that account for motion. This framework gives insight into the trade-off between sparing the healthy tissues and ensuring that the tumor receives sufficient dose. With this trade-off in mind, we show that our robust solution outperforms a nominal (no uncertainty) solution and a margin (worst-case) solution on a clinical case. Next, we perform an in-depth study into the structure of different intensity maps that were witnessed in the first part of the thesis. We consider parameterized intensity maps and investigate their ability to deliver a sufficient dose to the tumor in the presence of motion that follows a Gaussian distribution. We characterize the structure of optimal intensity maps in terms of certain conditions on the problem parameters. Finally, in the last part of the thesis, we study intensity-modulated proton therapy under uncertainty in the location of maximum dose deposited by the beamlets of radiation. We provide a robust formulation for the optimization of proton-based treatments and show that it outperforms traditional formulations in the face of uncertainty. In our computational experiments, we see evidence that optimal robust solutions use the physical characteristics of the proton beam to create dose distributions that are far less sensitive to the underlying uncertainty.}, keywords = {}, pubstate = {published}, tppubtype = {phdthesis} } In the context of patient care for life-threatening illnesses, the presence of uncertainty may compromise the quality of a treatment. In this thesis, we investigate robust approaches to managing uncertainty in radiation therapy treatments for cancer. In the first part of the thesis, we study the effect of breathing motion uncertainty on intensity-modulated radiation therapy treatments of a lung tumor. We construct a robust framework that generalizes current mathematical programming formulations that account for motion. This framework gives insight into the trade-off between sparing the healthy tissues and ensuring that the tumor receives sufficient dose. With this trade-off in mind, we show that our robust solution outperforms a nominal (no uncertainty) solution and a margin (worst-case) solution on a clinical case. Next, we perform an in-depth study into the structure of different intensity maps that were witnessed in the first part of the thesis. We consider parameterized intensity maps and investigate their ability to deliver a sufficient dose to the tumor in the presence of motion that follows a Gaussian distribution. We characterize the structure of optimal intensity maps in terms of certain conditions on the problem parameters. Finally, in the last part of the thesis, we study intensity-modulated proton therapy under uncertainty in the location of maximum dose deposited by the beamlets of radiation. We provide a robust formulation for the optimization of proton-based treatments and show that it outperforms traditional formulations in the face of uncertainty. In our computational experiments, we see evidence that optimal robust solutions use the physical characteristics of the proton beam to create dose distributions that are far less sensitive to the underlying uncertainty. |